RcppEigen/0000755000176200001440000000000012650010211012112 5ustar liggesusersRcppEigen/inst/0000755000176200001440000000000012647570720013115 5ustar liggesusersRcppEigen/inst/examples/0000755000176200001440000000000012253717461014731 5ustar liggesusersRcppEigen/inst/examples/lmBenchmark.R0000644000176200001440000000510212421176276017275 0ustar liggesusers## lmBenchmark.R: Benchmark different implementations of linear model solutions ## ## Copyright (C) 2011 Douglas Bates, Dirk Eddelbuettel and Romain Francois ## ## This file is part of RcppEigen. require("stats", character=TRUE, quietly=TRUE) require("rbenchmark", character=TRUE, quietly=TRUE) require("RcppEigen", character=TRUE, quietly=TRUE) ## define different versions of lm exprs <- list() ## These versions use rank-revealing decompositions and thus can ## handle rank-deficient cases. # default version used in lm() exprs$lm.fit <- expression(stats::lm.fit(mm, y)) # versions from RcppEigen ## column-pivoted QR decomposition - similar to lm.fit exprs$PivQR <- expression(.Call("fastLm", mm, y, 0L, PACKAGE="RcppEigen")) ## LDLt Cholesky decomposition with rank detection exprs$LDLt <- expression(.Call("fastLm", mm, y, 2L, PACKAGE="RcppEigen")) ## SVD using the Lapack subroutine dgesdd and Eigen support exprs$GESDD <- expression(.Call("fastLm", mm, y, 6L, PACKAGE="RcppEigen")) ## SVD (the JacobiSVD class from Eigen) exprs$SVD <- expression(.Call("fastLm", mm, y, 4L, PACKAGE="RcppEigen")) ## eigenvalues and eigenvectors of X'X exprs$SymmEig <- expression(.Call("fastLm", mm, y, 5L, PACKAGE="RcppEigen")) ## Non-rank-revealing decompositions. These work fine except when ## they don't. ## Unpivoted QR decomposition exprs$QR <- expression(.Call("fastLm", mm, y, 1L, PACKAGE="RcppEigen")) ## LLt Cholesky decomposition exprs$LLt <- expression(.Call("fastLm", mm, y, 3L, PACKAGE="RcppEigen")) if (suppressMessages(require("RcppArmadillo", character=TRUE, quietly=TRUE))) { exprs$arma <- expression(.Call("RcppArmadillo_fastLm", mm, y, PACKAGE="RcppArmadillo")) } if (suppressMessages(require("RcppGSL", character=TRUE, quietly=TRUE))) { exprs$GSL <- expression(.Call("fastLm", mm, y, PACKAGE="RcppGSL")) } do_bench <- function(n=100000L, p=40L, nrep=20L, suppressSVD=(n > 100000L)) { mm <- cbind(1, matrix(rnorm(n * (p - 1L)), nc=p-1L)) y <- rnorm(n) if (suppressSVD) exprs <- exprs[!names(exprs) %in% c("SVD", "GSL")] cat("lm benchmark for n = ", n, " and p = ", p, ": nrep = ", nrep, "\n", sep='') do.call(benchmark, c(exprs, list(order="relative", columns = c("test", "relative", "elapsed", "user.self", "sys.self"), replications = nrep))) } print(do_bench()) sessionInfo() .Call("eigen_version", FALSE, PACKAGE="RcppEigen") .Call("Eigen_SSE", FALSE, PACKAGE="RcppEigen") RcppEigen/inst/CITATION0000644000176200001440000000135012253717461014247 0ustar liggesuserscitHeader("To cite RcppEigen in publications use:") citEntry(entry = "Article", title = "Fast and Elegant Numerical Linear Algebra Using the {RcppEigen} Package", author = personList(as.person("Douglas Bates"), as.person("Dirk Eddelbuettel")), journal = "Journal of Statistical Software", year = "2013", volume = "52", number = "5", pages = "1--24", url = "http://www.jstatsoft.org/v52/i05/", textVersion = paste("Douglas Bates, Dirk Eddelbuettel (2013).", "Fast and Elegant Numerical Linear Algebra Using the RcppEigen Package.", "Journal of Statistical Software, 52(5), 1-24.", "URL http://www.jstatsoft.org/v52/i05/.") ) RcppEigen/inst/NEWS.Rd0000644000176200001440000001631712647570720014170 0ustar liggesusers\name{NEWS} \title{News for Package 'RcppEigen'} \newcommand{\ghpr}{\href{https://github.com/RcppCore/RcppEigen/pull/#1}{##1}} \newcommand{\ghit}{\href{https://github.com/RcppCore/RcppEigen/issues/#1}{##1}} \section{Changes in RcppEigen version 0.3.2.7.0 (2016-01-18)}{ \itemize{ \item Updated to version 3.2.7 of Eigen \item The fastLm example will not include the Lapack header if MKL is defined (thanks to PR \ghpr{25} by Alexey Stukalow) } } \section{Changes in RcppEigen version 0.3.2.5.1 (2015-09-23)}{ \itemize{ \item Corrected use of \code{kitten()} thanks to Grant Brown (#21) \item Applied upstream change to protect against undefined behaviour with null pointers } } \section{Changes in RcppEigen version 0.3.2.5.0 (2015-07-11)}{ \itemize{ \item Updated to version 3.2.5 of Eigen \item Fixed some incorrect error messages stemming from Eigen code. \item Updated package to current \code{R CMD check} standards. } } \section{Changes in RcppEigen version 0.3.2.4.0 (2015-02-23)}{ \itemize{ \item Updated to version 3.2.4 of Eigen \item Update \code{RcppEigen.package.skeleton()} to use \CRANpkg{pkgKitten} if available } } \section{Changes in RcppEigen version 0.3.2.3.0 (2014-12-22)}{ \itemize{ \item Updated to version 3.2.3 of Eigen \item Added a number of additional unit tests for \code{wrap}, \code{transform} and \code{sparse} \item Updated one of the examples } } \section{Changes in RcppEigen version 0.3.2.2.0 (2014-08-19)}{ \itemize{ \item Updated to version 3.2.2 of Eigen \item \code{Rcpp::as()} now supports the conversion from R vector to \dQuote{row array}, i.e., \code{Eigen::Array} \item \code{Rcpp::as()} now supports the conversion from \code{dgRMatrix} (row oriented sparse matrices, defined in \pkg{Matrix} package) to \code{Eigen::MappedSparseMatrix} \item Conversion from R matrix to \code{Eigen::MatrixXd} and \code{Eigen::ArrayXXd} using \code{Rcpp::as()} no longer gives compilation errors } } \section{Changes in RcppEigen version 0.3.2.1.2 (2014-05-05)}{ \itemize{ \item Applied upstream patch to not trigger g++ UBSAN warning on the cpu id comparison, with thanks to Gael Guennebaud for the patch } } \section{Changes in RcppEigen version 0.3.2.1.1 (2014-03-06)}{ \itemize{ \item Better \code{ifdef} on one directory entry feature, with thanks to Brian Ripley. } } \section{Changes in RcppEigen version 0.3.2.1.0 (2014-03-03)}{ \itemize{ \item Updated to version 3.2.1 of Eigen } } \section{Changes in RcppEigen version 0.3.2.0.3 (2014-03-01)}{ \itemize{ \item Updated and extended \code{RcppEigen.package.skeleton()} to use several examples via \CRANpkg{Rcpp} Attributes; also removed the deprecated \code{namespace} argument \item Updated skeleton package example for \CRANpkg{Rcpp} 0.11.0 or later by removing needed for linking with user library \item Updated files \code{DESCRIPTION}, \code{NAMESPACE}, \code{src/Makevars} and \code{src/Makevars.win} similarly } } \section{Changes in RcppEigen version 0.3.2.0.2 (2014-01-26)}{ \itemize{ \item Converted three unused unit test files to \CRANpkg{RUnit} and removed \code{Suggests:} of \CRANpkg{testthat} \item Add declaration to import a symbol from \CRANpkg{Rcpp} to \code{NAMESPACE} to ensure proper instantiation with the upcoming \CRANpkg{Rcpp} version \item Retire \code{SHLIB.maker} function } } \section{Changes in RcppEigen version 0.3.2.0.1 (2013-12-18)}{ \itemize{ \item New maintainer -- with a big thanks to Doug for all his work \item Applied two small patches to deal with non-g++ compilrs \item Clarifications concerning license and authorship of Eigen (as opposed to RcppEigen) code added to \code{DESCRIPTION} at the request of CRAN } } \section{Changes in RcppEigen version 0.3.2.0 (2013-11-13)}{ \itemize{ \item Update to version 3.2.0 of Eigen } } \section{Changes in RcppEigen version 0.3.1.2.3 (2013-10-25)}{ \itemize{ \item Fix to RcppEigenCholmod.h to incorporate changes in the cholmod_factor struct. These changes are necessary if code compiled against RcppEigen that uses CHOLMOD factors is to be run with versions of the Matrix package >= 1.1-0 } } \section{Changes in RcppEigen version 0.3.1.2 (2012-11-29)}{ \itemize{ \item Upgraded to Eigen 3.1.2 \item Fixes to RcppEigenWrap.h and adjustment of tests accordingly. The changes allow RowMajor matrices to be wrapped (thanks to Gael Guennebaud) but cannot handle RowVector types. There will need to be more template metaprogramming done to redirect the case of RowVector, which cannot be changed to a ColMajor form. \item Because of changes in R, -DNDEBUG is automatic. One must override it with -UNDEBUG in the local ~/.R/Makevars to activate the debugging code. \item New (unexported) functions CxxFlags() and RcppEigenCxxFlags() for use in Makefiles \item Fixes related to Rcpp 0.10.* } } \section{Changes in RcppEigen version 0.3.1 (2012-08-07)}{ \itemize{ \item Upgraded to Eigen 3.1.0 \item Removed the "unsupported" Eigen module AutoDiff which defined a macro "sign" that conflicted with a function in the R API (which really should be visible as "Rf_sign", not sure why it shows up as "sign" and don't have time to investigate) \item Commented out several tests involving complex vectors and matrices. Again there are compilation problems related to conflicting definitions in the std:: namespace and the R API and Eigen, which I don't have time to investigate. } } \section{Changes in RcppEigen version 0.2.0 (2012-03-12)}{ \itemize{ \item Upgraded the version of Eigen to 3.1.0-alpha2, in which the sparse matrix modules are now in the "supported" tree. \item Added several "unsupported" Eigen modules including \itemize{ \item AutoDiff (a small automatic differentiation package adapted to vectors and matrices) \item IterativeSolvers (iterative linear and nonlinear solver algorithms) \item KroneckerProduct (as the name implies) \item MatrixFunctions (matrix cos, exp, log, sin, sinh, etc.) \item NonlinearOptimization (based on minpack but uses reverse communication - yay!) \item NumericalDiff (numerical differentiation of vector-valued or matrix-valued functions) \item Polynomials (polynomial representation and solution using a QR algorithm) \item Skyline (sparse skyline matrices useful in finite-element codes) \item SparseExtra (dynamic sparse matrices, now deprecated, and Matrix Market I/O functions) \item Splines (multidimensional spline representations and spline interpolation) } \item At present all these modules, including the MatrixFunctions module, are included with RcppEigen.h but that may change if too many people get unexpected results from A.exp() \item The ability to wrap RowMajor sparse matrices and to use as etc. \item Migrated some tests to the testthat package. Currently there is some difficulty with combining testthat, inline and R CMD check. } } RcppEigen/inst/COPYRIGHTS0000644000176200001440000006535312254314420014532 0ustar liggesusersFiles: inst/include/Eigen/* inst/include/unsupported/Eigen/* Copyright: 2006 - 2012 Gael Guennebaud, Benoit Jacob License: MPL-2 Files: unsupported/Eigen/FFT unsupported/Eigen/src/FFT/ei_fftw_impl.h unsupported/Eigen/src/FFT/ei_kissfft_impl.h Copyright: 2003 - 2009 Mark Borgerding License: MPL-2 Files: inst/include/Eigen/src/SuperLUSupport/SuperLUSupport.h Copyright: 1994 Xerox Corporation License: THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. Files: inst/include/unsupported/Eigen/src/IterativeSolvers/IterationController.h Copyright: 1997 - 2001 Andrew Lumsdaine and Lie-Quan Lee License: This file is part of the Iterative Template Library You should have received a copy of the License Agreement for the Iterative Template Library along with the software; see the file LICENSE. Permission to modify the code and to distribute modified code is granted, provided the text of this NOTICE is retained, a notice that the code was modified is included with the above COPYRIGHT NOTICE and with the COPYRIGHT NOTICE in the LICENSE file, and that the LICENSE file is distributed with the modified code. LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. By way of example, but not limitation, Licensor MAKES NO REPRESENTATIONS OR WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE LICENSED SOFTWARE COMPONENTS OR DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. Files: inst/include/unsupported/Eigen/Splines inst/include/unsupported/Eigen/src/Splines/SplineFitting.h inst/include/unsupported/Eigen/src/Splines/SplineFwd.h inst/include/unsupported/Eigen/src/Splines/Spline.h inst/include/Eigen/src/Geometry/Umeyama.h inst/include/Eigen/src/StlSupport/details.h inst/include/Eigen/src/StlSupport/StdDeque.h inst/include/Eigen/src/StlSupport/StdList.h inst/include/Eigen/src/StlSupport/StdVector.h inst/include/Eigen/StdDeque inst/include/Eigen/StdList inst/include/Eigen/StdVector inst/include/Eigen/src/Core/DenseStorage.h inst/include/Eigen/src/Core/util/Memory.h inst/include/Eigen/src/Geometry/Transform.h Copyright: 2009 - 2011 Hauke Heibel License: MPL-2 Files: inst/include/Eigen/src/LU/arch/Inverse_SSE.h inst/include/Eigen/src/Cholesky/LLT_MKL.h inst/include/Eigen/src/Core/Assign_MKL.h inst/include/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h inst/include/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h inst/include/Eigen/src/Core/products/GeneralMatrixVector_MKL.h inst/include/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h inst/include/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h inst/include/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h inst/include/Eigen/src/Core/products/TriangularMatrixVector_MKL.h inst/include/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h inst/include/Eigen/src/Core/util/MKL_support.h inst/include/Eigen/src/Eigenvalues/ComplexSchur_MKL.h inst/include/Eigen/src/Eigenvalues/RealSchur_MKL.h inst/include/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h inst/include/Eigen/src/LU/PartialPivLU_MKL.h inst/include/Eigen/src/PardisoSupport/PardisoSupport.h inst/include/Eigen/src/QR/ColPivHouseholderQR_MKL.h inst/include/Eigen/src/QR/HouseholderQR_MKL.h inst/include/Eigen/src/SVD/JacobiSVD_MKL.h Copyright: 2001 Intel Corporation License: Permition is granted to use, copy, distribute and prepare derivative works of this library for any purpose and without fee, provided, that the above copyright notice and this statement appear in all copies. Intel makes no representations about the suitability of this software for any purpose, and specifically disclaims all warranties. Files: inst/include/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h Copyright: 2002 - 2007 Yves Renard License: LPGL-2.1 Files: inst/include/Eigen/src/OrderingMethods/Amd.h Copyright: 2006 Timothy A. Davis License: LGPL-2.1 Files: inst/include/Eigen/src/Core/arch/SSE/MathFunctions.h Copyright: 2007 Julien Pommier License: MPL-2 Files: inst/include/Eigen/src/Core/Assign.h Copyright: 2007 Michael Olbrich License: MPL-2 Files: inst/include/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h inst/include/unsupported/Eigen/src/Skyline/SkylineMatrix.h inst/include/unsupported/Eigen/src/Skyline/SkylineProduct.h inst/include/unsupported/Eigen/src/Skyline/SkylineStorage.h inst/include/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h inst/include/unsupported/Eigen/src/Skyline/SkylineUtil.h Copyright: 2008 - 2009 Guillaume Saupin ; License: MPL-2 Files: inst/include/Eigen/src/Core/arch/AltiVec/PacketMath.h inst/include/Eigen/src/Core/arch/NEON/PacketMath.h Copyright: 2008 Konstantinos Margaritis License: MPL-2 Files: inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h inst/include/unsupported/Eigen/src/MatrixFunction.h inst/include/unsupported/Eigen/MatrixFunctions inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h inst/include/Eigen/src/Eigenvalues/ComplexEigenSolver.h inst/include/Eigen/src/Eigenvalues/ComplexSchur.h inst/include/Eigen/src/Eigenvalues/EigenSolver.h inst/include/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h inst/include/Eigen/src/Eigenvalues/HessenbergDecomposition.h inst/include/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h inst/include/Eigen/src/Eigenvalues/RealSchur.h inst/include/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h inst/include/Eigen/src/Eigenvalues/Tridiagonalization.h inst/include/unsupported/Eigen/src/MatrixFunctions/StemFunction.h inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h Copyright: 2009 - 2010 Jitse Niesen License: MPL-2 Files: inst/include/Eigen/src/Eigenvalues/ComplexEigenSolver.h inst/include/Eigen/src/Eigenvalues/ComplexSchur.h Copyright: 2009 Claire Maurice License: MPL-2 Files: inst/include/unsupported/Eigen/BVH inst/include/unsupported/Eigen/src/BVH/BVAlgorithms.h inst/include/unsupported/Eigen/src/BVH/KdBVH.h Copyright: 2009 Ilya Baran License: MPL-2 Files: inst/include/Eigen/src/Cholesky/LDLT.h Copyright: 2009 Keir Mierle License: MPL-2 Files: inst/include/Eigen/src/Geometry/Quaternion.h Copyright: 2009 Mathieu Gautier License: MPL-2 Files: inst/include/Eigen/src/Geometry/arch/Geometry_SSE.h inst/include/unsupported/Eigen/src/MoreVectorization/MathFunctions.h Copyright: 2009 Rohit Garg License: MPL-2 Files: inst/include/unsupported/Eigen/NonLinearOptimization inst/include/unsupported/Eigen/NumericalDiff inst/include/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h inst/include/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h inst/include/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h inst/include/Eigen/src/Core/util/Memory.h Copyright: 2009 - 2010 Thomas Capricelli License: MPL-2 Files: inst/include/Eigen/src/SparseCore/SparseView.h inst/include/Eigen/src/Sparse/SparseView.h Copyright: 2010 Daniel Lowengrub License: MPL-2 Files: inst/include/unsupported/Eigen/src/Polynomials/Companion.h inst/include/unsupported/Eigen/src/Polynomials/PolynomialSolver.h inst/include/unsupported/Eigen/src/Polynomials/PolynomialUtils.h Copyright: 2010 Manuel Yguel License: MPL-2 Files: inst/include/Eigen/src/Householder/BlockHouseholder.h inst/include/Eigen/src/QR/HouseholderQR.h Copyright: 2010 Vincent Lejeune License: MPL-2 Files: inst/include/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h Copyright: 2011 Andreas Platen License: MPL-2 Files: inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h Copyright: 2011 Chen-Pang He License: MPL-2 Files: inst/include/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h inst/include/unsupported/Eigen/src/IterativeSolvers/GMRES.h Copyright: 2012 Kolja Brix License: MPL-2 Files: inst/include/Eigen/src/Cholesky/LDLT.h Copyright: 2011 Timothy E. Holy License: MPL-2 Files: inst/include/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h inst/include/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h inst/include/Eigen/src/PaStiXSupport/PaStiXSupport.h inst/include/unsupported/Eigen/src/IterativeSolvers/Scaling.h inst/include/unsupported/Eigen/src/SparseExtra/MarketIO.h inst/include/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h Copyright: 2012 Désiré Nuentsa-Wakam License: MPL-2 Files: * Copyright: 2011 - 2013 Douglas Bates, Romain Francois and Dirk Eddelbuettel License: GPL-2 Files: debian/* Copyright: 2013 Dirk Eddelbuettel License: GPL-2 GNU R ships with various open source licenses including - the GPL license (version 2), use RShowDoc("GPL-2") to display it - the LGPL license (version 2.1), use RShowDoc("LGPL-2.1") to display it The MPL-2 is included below. License: MPL-2 Mozilla Public License Version 2.0 ================================== . 1. Definitions -------------- . 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. . 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. . 1.3. "Contribution" means Covered Software of a particular Contributor. . 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. . 1.5. "Incompatible With Secondary Licenses" means . (a) that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or . (b) that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. . 1.6. "Executable Form" means any form of the work other than Source Code Form. . 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. . 1.8. "License" means this document. . 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. . 1.10. "Modifications" means any of the following: . (a) any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or . (b) any new file in Source Code Form that contains any Covered Software. . 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. . 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. . 1.13. "Source Code Form" means the form of the work preferred for making modifications. . 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. . 2. License Grants and Conditions -------------------------------- . 2.1. Grants . Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: . (a) under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and . (b) under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. . 2.2. Effective Date . The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. . 2.3. Limitations on Grant Scope . The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: . (a) for any code that a Contributor has removed from Covered Software; or . (b) for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or . (c) under Patent Claims infringed by Covered Software in the absence of its Contributions. . This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). . 2.4. Subsequent Licenses . No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). . 2.5. Representation . Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. . 2.6. Fair Use . This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. . 2.7. Conditions . Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. . 3. Responsibilities ------------------- . 3.1. Distribution of Source Form . All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. . 3.2. Distribution of Executable Form . If You distribute Covered Software in Executable Form then: . (a) such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and . (b) You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. . 3.3. Distribution of a Larger Work . You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). . 3.4. Notices . You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. . 3.5. Application of Additional Terms . You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. . 4. Inability to Comply Due to Statute or Regulation --------------------------------------------------- . If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. . 5. Termination -------------- . 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. . 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. . 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. . ************************************************************************ * * * 6. Disclaimer of Warranty * * ------------------------- * * * * Covered Software is provided under this License on an "as is" * * basis, without warranty of any kind, either expressed, implied, or * * statutory, including, without limitation, warranties that the * * Covered Software is free of defects, merchantable, fit for a * * particular purpose or non-infringing. The entire risk as to the * * quality and performance of the Covered Software is with You. * * Should any Covered Software prove defective in any respect, You * * (not any Contributor) assume the cost of any necessary servicing, * * repair, or correction. This disclaimer of warranty constitutes an * * essential part of this License. No use of any Covered Software is * * authorized under this License except under this disclaimer. * * * ************************************************************************ . ************************************************************************ * * * 7. Limitation of Liability * * -------------------------- * * * * Under no circumstances and under no legal theory, whether tort * * (including negligence), contract, or otherwise, shall any * * Contributor, or anyone who distributes Covered Software as * * permitted above, be liable to You for any direct, indirect, * * special, incidental, or consequential damages of any character * * including, without limitation, damages for lost profits, loss of * * goodwill, work stoppage, computer failure or malfunction, or any * * and all other commercial damages or losses, even if such party * * shall have been informed of the possibility of such damages. This * * limitation of liability shall not apply to liability for death or * * personal injury resulting from such party's negligence to the * * extent applicable law prohibits such limitation. Some * * jurisdictions do not allow the exclusion or limitation of * * incidental or consequential damages, so this exclusion and * * limitation may not apply to You. * * * ************************************************************************ . 8. Litigation ------------- . Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. . 9. Miscellaneous ---------------- . This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. . 10. Versions of the License --------------------------- . 10.1. New Versions . Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. . 10.2. Effect of New Versions . You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. . 10.3. Modified Versions . If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). . 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses . If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. . Exhibit A - Source Code Form License Notice ------------------------------------------- . This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. . If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. . You may add additional accurate notices of copyright ownership. . Exhibit B - "Incompatible With Secondary Licenses" Notice --------------------------------------------------------- . This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. RcppEigen/inst/doc/0000755000176200001440000000000012647727164013670 5ustar liggesusersRcppEigen/inst/doc/RcppEigen-Introduction.Rnw0000644000176200001440000024374712647727164020734 0ustar liggesusers\documentclass[shortnames,article,nojss]{jss} \usepackage{booktabs,bm,amsmath,thumbpdf} %\VignetteIndexEntry{RcppEigen-intro} %\VignetteKeywords{linear algebra, template programming, C++, R, Rcpp} %\VignettePackage{RcppEigen} %% VIGNETTE <>= pkgVersion <- packageDescription("RcppEigen")$Version pkgDate <- packageDescription("RcppEigen")$Date prettyDate <- format(Sys.Date(), "%B %e, %Y") #require("RcppEigen") #eigenVersion <- paste(unlist(.Call("eigen_version", FALSE)), collapse=".") @ \author{Douglas Bates\\University of Wisconsin-Madison \And Dirk Eddelbuettel\\Debian Project} \Plainauthor{Douglas Bates, Dirk Eddelbuettel} \title{Fast and Elegant Numerical Linear Algebra Using the \pkg{RcppEigen} Package} \Plaintitle{Fast and Elegant Numerical Linear Algebra Using the RcppEigen Package} \Shorttitle{Fast and Elegant Numerical Linear Algebra with \pkg{RcppEigen}} \Abstract{ The \pkg{RcppEigen} package provides access from \proglang{R} \citep{R:Main} to the \pkg{Eigen} \citep*{Eigen:Web} \proglang{C++} template library for numerical linear algebra. \pkg{Rcpp} \citep{CRAN:Rcpp,Eddelbuettel:2013:Rcpp} classes and specializations of the \proglang{C++} templated functions \code{as} and \code{wrap} from \pkg{Rcpp} provide the ``glue'' for passing objects from \proglang{R} to \proglang{C++} and back. Several introductory examples are presented. This is followed by an in-depth discussion of various available approaches for solving least-squares problems, including rank-revealing methods, concluding with an empirical run-time comparison. Last but not least, sparse matrix methods are discussed. } \Keywords{linear algebra, template programming, \proglang{R}, \proglang{C++}, \pkg{Rcpp}} \Plainkeywords{linear algebra, template programmig, R, C++, Rcpp} \Address{ Douglas Bates \\ Department of Statistics \\ University of Wisconsin-Madison \\ Madison, WI, United States of America \\ E-mail: \email{bates@stat.wisc.edu} \\ URL: \url{http://www.stat.wisc.edu/~bates/}\\ Dirk Eddelbuettel \\ Debian Project \\ River Forest, IL, United States of America\\ E-mail: \email{edd@debian.org}\\ URL: \url{http://dirk.eddelbuettel.com}\\ } \usepackage{Sweave} \newcommand{\argmin}{\operatorname{argmin}\displaylimits} \newcommand{\rank}{\operatorname{rank}} %% highlights macros %% Style definition file generated by highlight 2.7, http://www.andre-simon.de/ % Highlighting theme definition: \newcommand{\hlstd}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\hlnum}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\hlopt}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\hlesc}[1]{\textcolor[rgb]{0.74,0.55,0.55}{#1}} \newcommand{\hlstr}[1]{\textcolor[rgb]{0.90,0.15,0.15}{#1}} \newcommand{\hldstr}[1]{\textcolor[rgb]{0.74,0.55,0.55}{#1}} \newcommand{\hlslc}[1]{\textcolor[rgb]{0.67,0.13,0.13}{\it{#1}}} \newcommand{\hlcom}[1]{\textcolor[rgb]{0.67,0.13,0.13}{\it{#1}}} \newcommand{\hldir}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\hlsym}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\hlline}[1]{\textcolor[rgb]{0.33,0.33,0.33}{#1}} \newcommand{\hlkwa}[1]{\textcolor[rgb]{0.61,0.13,0.93}{\bf{#1}}} \newcommand{\hlkwb}[1]{\textcolor[rgb]{0.13,0.54,0.13}{#1}} \newcommand{\hlkwc}[1]{\textcolor[rgb]{0,0,1}{#1}} \newcommand{\hlkwd}[1]{\textcolor[rgb]{0,0,0}{#1}} \definecolor{bgcolor}{rgb}{1,1,1} % ------------------------------------------------------------------------ \begin{document} \SweaveOpts{engine=R,eps=FALSE} \begin{quote} \footnotesize This vignette corresponds to a \href{http://www.jstatsoft.org/v52/i05/}{paper published} in the \textsl{Journal of Statistical Software}. Currently still identical to the paper, this vignette version may over time receive minor updates. For citations, please use \citet{JSS:RcppEigen} as provided by \code{citation("RcppEigen")}. This version corresponds to \pkg{RcppEigen} version \Sexpr{pkgVersion} and was typeset on \Sexpr{prettyDate}. \end{quote} \section{Introduction} \label{sec:intro} Linear algebra is an essential building block of statistical computing. Operations such as matrix decompositions, linear program solvers, and eigenvalue/eigenvector computations are used in many estimation and analysis routines. As such, libraries supporting linear algebra have long been provided by statistical programmers for different programming languages and environments. Because it is object-oriented, \proglang{C++}, one of the central modern languages for numerical and statistical computing, is particularly effective at representing matrices, vectors and decompositions, and numerous class libraries providing linear algebra routines have been written over the years. As both the \proglang{C++} language and standards have evolved \citep{Meyers:2005:EffectiveC++,Meyers:1995:MoreEffectiveC++,Cpp11}, so have the compilers implementing the language. Relatively modern language constructs such as template meta-programming are particularly useful because they provide overloading of operations (allowing expressive code in the compiled language similar to what can be done in scripting languages) and can shift some of the computational burden from the run-time to the compile-time. (A more detailed discussion of template meta-programming is, however, beyond the scope of this paper). \cite{Veldhuizen:1998:Blitz} provided an early and influential implementation of numerical linear algebra classes that already demonstrated key features of this approach. Its usage was held back at the time by the limited availability of compilers implementing all the necessary features of the \proglang{C++} language. This situation has greatly improved over the last decade, and many more libraries have been made available. One such \proglang{C++} library is \pkg{Eigen} by \citet*{Eigen:Web} which started as a sub-project to KDE (a popular Linux desktop environment), initially focussing on fixed-size matrices to represent projections in a visualization application. \pkg{Eigen} grew from there and has over the course of about a decade produced three major releases with \pkg{Eigen}3 being the current major version. To check the minor and patch version numbers, load the \pkg{RcppEigen} package and call \begin{CodeInput} R> .Call("eigen_version", FALSE) \end{CodeInput} \begin{CodeOutput} major minor patch 3 1 1 \end{CodeOutput} \pkg{Eigen} is of interest as the \proglang{R} system for statistical computation and graphics \citep{R:Main} is itself easily extensible. This is particular true via the \proglang{C} language that most of \proglang{R}'s compiled core parts are written in, but also for the \proglang{C++} language which can interface with \proglang{C}-based systems rather easily. The manual ``Writing \proglang{R} Extensions'' \citep{R:Extensions} is the basic reference for extending \proglang{R} with either \proglang{C} or \proglang{C++}. The \pkg{Rcpp} package by \citet{JSS:Rcpp,CRAN:Rcpp} facilitates extending \proglang{R} with \proglang{C++} code by providing seamless object mapping between both languages. % As stated in the \pkg{Rcpp} \citep{CRAN:Rcpp} vignette, ``Extending \pkg{Rcpp}'' as well as in \citet{Eddelbuettel:2013:Rcpp} \begin{quote} \pkg{Rcpp} facilitates data interchange between \proglang{R} and \proglang{C++} through the templated functions \code{Rcpp::as} (for conversion of objects from \proglang{R} to \proglang{C++}) and \code{Rcpp::wrap} (for conversion from \proglang{C++} to \proglang{R}). \end{quote} The \pkg{RcppEigen} package provides the header files composing the \pkg{Eigen} \proglang{C++} template library, along with implementations of \code{Rcpp::as} and \code{Rcpp::wrap} for the \proglang{C++} classes defined in \pkg{Eigen}. The \pkg{Eigen} classes themselves provide high-performance, versatile and comprehensive representations of dense and sparse matrices and vectors, as well as decompositions and other functions to be applied to these objects. The next section introduces some of these classes and shows how to interface to them from \proglang{R}. \section[Eigen classes]{\pkg{Eigen} classes} \label{sec:eclasses} \pkg{Eigen} is a \proglang{C++} template library providing classes for many forms of matrices, vectors, arrays and decompositions. These classes are flexible and comprehensive allowing for both high performance and well structured code representing high-level operations. \proglang{C++} code based on \pkg{Eigen} is often more like \proglang{R} code, working on the ``whole object'', than like compiled code in other languages where operations often must be coded in loops. As in many \proglang{C++} template libraries using template meta-programming \citep{Abrahams+Gurtovoy:2004:TemplateMetaprogramming}, the templates themselves can be very complicated. However, \pkg{Eigen} provides \code{typedef}s for common classes that correspond to \proglang{R} matrices and vectors, as shown in Table~\ref{tab:REigen}, and this paper will use these \code{typedef}s. \begin{table}[t!] \centering \begin{tabular}{l l} \hline \multicolumn{1}{c}{\proglang{R} object type} & \multicolumn{1}{c}{\pkg{Eigen} class typedef}\\ \hline numeric matrix & \code{MatrixXd}\\ integer matrix & \code{MatrixXi}\\ complex matrix & \code{MatrixXcd}\\ numeric vector & \code{VectorXd}\\ integer vector & \code{VectorXi}\\ complex vector & \code{VectorXcd}\\ \code{Matrix::dgCMatrix} \phantom{XXX} & \code{SparseMatrix}\\ \hline \end{tabular} \caption{Correspondence between \proglang{R} matrix and vector types and classes in the \pkg{Eigen} namespace. \label{tab:REigen}} \end{table} Here, \code{Vector} and \code{Matrix} describe the dimension of the object. The \code{X} signals that these are dynamically-sized objects (as opposed to fixed-size matrices such as $3 \times 3$ matrices also available in \pkg{Eigen}). Lastly, the trailing characters \code{i}, \code{d} and \code{cd} denote, respectively, storage types \code{integer}, \code{double} and \code{complex double}. The \proglang{C++} classes shown in Table~\ref{tab:REigen} are in the \pkg{Eigen} namespace, which means that they must be written as \code{Eigen::MatrixXd}. However, if one prefaces the use of these class names with a declaration like \begin{quote} \noindent \ttfamily \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{MatrixXd}\hlopt{;}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \end{quote} \vspace*{-0.4cm} then one can use these names without the namespace qualifier. \subsection[Mapped matrices in Eigen]{Mapped matrices in \pkg{Eigen}} \label{sec:mapped} Storage for the contents of matrices from the classes shown in Table~\ref{tab:REigen} is allocated and controlled by the class constructors and destructors. Creating an instance of such a class from an \proglang{R} object involves copying its contents. An alternative is to have the contents of the \proglang{R} matrix or vector mapped to the contents of the object from the \pkg{Eigen} class. For dense matrices one can use the \pkg{Eigen} templated class \code{Map}, and for sparse matrices one can deploy the \pkg{Eigen} templated class \code{MappedSparseMatrix}. One must, of course, be careful not to modify the contents of the \proglang{R} object in the \proglang{C++} code. A recommended practice is always to declare mapped objects as {\ttfamily\hlkwb{const}\normalfont}. \subsection[Arrays in Eigen]{Arrays in \pkg{Eigen}} \label{sec:arrays} For matrix and vector classes \pkg{Eigen} overloads the \code{`*'} operator as matrix multiplication. Occasionally component-wise operations instead of matrix operations are desired, for which the \code{Array} templated classes are used in \pkg{Eigen}. Switching back and forth between matrices or vectors and arrays is usually done via the \code{array()} method for matrix or vector objects and the \code{matrix()} method for arrays. \subsection[Structured matrices in Eigen]{Structured matrices in \pkg{Eigen}} \label{sec:structured} \pkg{Eigen} provides classes for matrices with special structure such as symmetric matrices, triangular matrices and banded matrices. For dense matrices, these special structures are described as ``views'', meaning that the full dense matrix is stored but only part of the matrix is used in operations. For a symmetric matrix one must specify whether the lower triangle or the upper triangle is to be used as the contents, with the other triangle defined by the implicit symmetry. \section{Some simple examples} \label{sec:simple} \proglang{C++} functions to perform simple operations on matrices or vectors can follow a pattern of: \begin{enumerate} \item Map the \proglang{R} objects passed as arguments into \pkg{Eigen} objects. \item Create the result. \item Return \code{Rcpp::wrap} applied to the result. \end{enumerate} An idiom for the first step is % using Eigen::Map; % using Eigen::MatrixXd; % using Rcpp::as; % % const Map A(as >(AA)); %\end{lstlisting} \begin{quote} \noindent \ttfamily \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlsym{::}\hlstd{Map}\hlsym{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlsym{::}\hlstd{MatrixXd}\hlsym{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using\ }\hlstd{Rcpp}\hlsym{::}\hlstd{as}\hlsym{;}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwb{const\ }\hlstd{Map}\hlsym{$<$}\hlstd{MatrixXd}\hlsym{$>$}\hlstd{\ \ }\hlsym{}\hlstd{}\hlkwd{A}\hlstd{}\hlsym{(}\hlstd{as}\hlsym{$<$}\hlstd{Map}\hlsym{$<$}\hlstd{MatrixXd}\hlsym{$>$\ $>$(}\hlstd{AA}\hlsym{));}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \end{quote} \vspace*{-0.4cm} where \code{AA} is the name of the \proglang{R} object (of type \code{SEXP} in \proglang{C} and \proglang{C++}) passed to the \proglang{C++} function. An alternative to the \code{using} declarations is to declare a \code{typedef} as in % typedef Eigen::Map MapMatd; % const MapMatd A(Rcpp::as(AA)); \begin{quote} \noindent \ttfamily \hlstd{}\hlkwc{typedef\ }\hlstd{Eigen}\hlopt{::}\hlstd{Map}\hlopt{$<$}\hlstd{Eigen}\hlopt{::}\hlstd{MatrixXd}\hlopt{$>$}\hlstd{\ \ \ }\hlopt{}\hlstd{MapMatd}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MapMatd}\hlstd{\ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{A}\hlstd{}\hlopt{(}\hlstd{Rcpp}\hlopt{::}\hlstd{as}\hlopt{$<$}\hlstd{MapMatd}\hlopt{$>$(}\hlstd{AA}\hlopt{));}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \end{quote} \vspace*{-0.4cm} The \code{cxxfunction} function from the \pkg{inline} package \citep*{CRAN:inline} for \proglang{R} and its \pkg{RcppEigen} plugin provide a convenient method of developing and debugging the \proglang{C++} code. For actual production code one generally incorporates the \proglang{C++} source code files in a package and includes the line \code{LinkingTo: Rcpp, RcppEigen} in the package's \code{DESCRIPTION} file. The \code{RcppEigen.package.skeleton} function provides a quick way of generating the skeleton of a package that will use \pkg{RcppEigen}. The \code{cxxfunction} with the \code{"Rcpp"} or \code{"RcppEigen"} plugins has the \code{as} and \code{wrap} functions already defined as \code{Rcpp::as} and \code{Rcpp::wrap}. In the examples below these declarations are omitted. It is important to remember that they are needed in actual \proglang{C++} source code for a package. The first few examples are simply for illustration as the operations shown could be more effectively performed directly in \proglang{R}. Finally, the results from \pkg{Eigen} are compared to those from the direct \proglang{R} results. \subsection{Transpose of an integer matrix} \label{sec:transpose} The next \proglang{R} code snippet creates a simple matrix of integers \begin{CodeInput} R> (A <- matrix(1:6, ncol = 2)) \end{CodeInput} \begin{CodeOutput} [,1] [,2] [1,] 1 4 [2,] 2 5 [3,] 3 6 \end{CodeOutput} \begin{CodeInput} R> str(A) \end{CodeInput} \begin{CodeOutput} int [1:3, 1:2] 1 2 3 4 5 6 \end{CodeOutput} and, in Figure~\ref{trans}, the \code{transpose()} method for the \code{Eigen::MatrixXi} class is used to return the transpose of the supplied matrix. The \proglang{R} matrix in the \code{SEXP} \code{AA} is first mapped to an \code{Eigen::MatrixXi} object, and then the matrix \code{At} is constructed from its transpose and returned to \proglang{R}. \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlsym{::}\hlstd{Map}\hlsym{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlsym{::}\hlstd{MatrixXi}\hlsym{;}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlslc{//\ Map\ the\ integer\ matrix\ AA\ from\ R}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{Map}\hlsym{$<$}\hlstd{MatrixXi}\hlsym{$>$}\hlstd{\ \ }\hlsym{}\hlstd{}\hlkwd{A}\hlstd{}\hlsym{(}\hlstd{as}\hlsym{$<$}\hlstd{Map}\hlsym{$<$}\hlstd{MatrixXi}\hlsym{$>$\ $>$(}\hlstd{AA}\hlsym{));}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlslc{//\ evaluate\ and\ return\ the\ transpose\ of\ A}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MatrixXi}\hlstd{\ \ \ \ \ \ }\hlstd{}\hlkwd{At}\hlstd{}\hlsym{(}\hlstd{A}\hlsym{.}\hlstd{}\hlkwd{transpose}\hlstd{}\hlsym{());}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{}\hlkwd{wrap}\hlstd{}\hlsym{(}\hlstd{At}\hlsym{);}\hlstd{}\hspace*{\fill} \normalfont \hrule \caption{\code{transCpp}: Transpose a matrix of integers. \label{trans}} \end{figure} The \proglang{R} snippet below compiles and links the \proglang{C++} code segment. The actual work is done by the function \code{cxxfunction} from the \pkg{inline} package which compiles, links and loads code written in \proglang{C++} and supplied as a character variable. This frees the user from having to know about compiler and linker details and options, which makes ``exploratory programming'' much easier. Here the program piece to be compiled is stored as a character variable named \code{transCpp}, and \code{cxxfunction} creates an executable function which is assigned to \code{ftrans}. This new function is used on the matrix $\bm A$ shown above, and one can check that it works as intended by comparing the output to an explicit transpose of the matrix argument. \begin{CodeInput} R> ftrans <- cxxfunction(signature(AA = "matrix"), transCpp, + plugin = "RcppEigen") R> (At <- ftrans(A)) \end{CodeInput} \begin{CodeOutput} [,1] [,2] [,3] [1,] 1 2 3 [2,] 4 5 6 \end{CodeOutput} \begin{CodeInput} R> stopifnot(all.equal(At, t(A))) \end{CodeInput} For numeric or integer matrices the \code{adjoint()} method is equivalent to the \code{transpose()} method. For complex matrices, the adjoint is the conjugate of the transpose. In keeping with the conventions in the \pkg{Eigen} documentation the \code{adjoint()} method is used in what follows to create the transpose of numeric or integer matrices. \subsection{Products and cross-products} \label{sec:products} As mentioned in Section~\ref{sec:arrays}, the \code{`*'} operator is overloaded as matrix multiplication for the various matrix and vector classes in \pkg{Eigen}. The \proglang{C++} code in Figure~\ref{prod} produces a list containing both the product and cross-product (in the sense of the \proglang{R} function call \code{crossproduct(A, B)} evaluating $\bm A^\top\bm B$) of its two arguments % \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwc{typedef\ }\hlstd{Eigen}\hlopt{::}\hlstd{Map}\hlopt{$<$}\hlstd{Eigen}\hlopt{::}\hlstd{MatrixXi}\hlopt{$>$}\hlstd{\ \ \ }\hlopt{}\hlstd{MapMati}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MapMati}\hlstd{\ \ \ \ }\hlstd{}\hlkwd{B}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapMati}\hlopt{$>$(}\hlstd{BB}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MapMati}\hlstd{\ \ \ \ }\hlstd{}\hlkwd{C}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapMati}\hlopt{$>$(}\hlstd{CC}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"B\ \%{*}\%\ C"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ \ \ \ \ \ \ }\hlopt{=\ }\hlstd{B\ }\hlopt{{*}\ }\hlstd{C}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"crossprod(B,\ C)"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{B}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()\ {*}\ }\hlstd{C}\hlopt{);}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{prodCpp}: Product and cross-product of two matrices. \label{prod}} \end{figure} % \begin{CodeInput} R> fprod <- cxxfunction(signature(BB = "matrix", CC = "matrix"), + prodCpp, "RcppEigen") R> B <- matrix(1:4, ncol = 2) R> C <- matrix(6:1, nrow = 2) R> str(fp <- fprod(B, C)) \end{CodeInput} \begin{CodeOutput} List of 2 $ B %*% C : int [1:2, 1:3] 21 32 13 20 5 8 $ crossprod(B, C): int [1:2, 1:3] 16 38 10 24 4 10 \end{CodeOutput} \begin{CodeInput} R> stopifnot(all.equal(fp[[1]], B %*% C), all.equal(fp[[2]], crossprod(B, C))) \end{CodeInput} % Note that the \code{create} class method for \code{Rcpp::List} implicitly applies \code{Rcpp::wrap} to its arguments. \subsection{Crossproduct of a single matrix} \label{sec:crossproduct} As shown in the last example, the \proglang{R} function \code{crossprod} calculates the product of the transpose of its first argument with its second argument. The single argument form, \code{crossprod(X)}, evaluates $\bm X^\top\bm X$. One could, of course, calculate this product as \begin{verbatim} t(X) %*% X \end{verbatim} but \code{crossprod(X)} is roughly twice as fast because the result is known to be symmetric and only one triangle needs to be calculated. The function \code{tcrossprod} evaluates \code{crossprod(t(X))} without actually forming the transpose. To express these calculations in \pkg{Eigen}, a \code{SelfAdjointView}---which is a dense matrix of which only one triangle is used, the other triangle being inferred from the symmetry---is created. (The characterization ``self-adjoint'' is equivalent to symmetric for non-complex matrices.) The \pkg{Eigen} class name is \code{SelfAdjointView}. The method for general matrices that produces such a view is called \code{selfadjointView}. Both require specification of either the \code{Lower} or \code{Upper} triangle. For triangular matrices the class is \code{TriangularView} and the method is \code{triangularView}. The triangle can be specified as \code{Lower}, \code{UnitLower}, \code{StrictlyLower}, \code{Upper}, \code{UnitUpper} or \code{StrictlyUpper}. For self-adjoint views the \code{rankUpdate} method adds a scalar multiple of $\bm A\bm A^\top$ to the current symmetric matrix. The scalar multiple defaults to 1. The code in Figure~\ref{crossprod} produces both $\bm A^\top\bm A$ and $\bm A\bm A^\top$ from a matrix $\bm A$. \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{Map}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{MatrixXi}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{Lower}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwb{const\ }\hlstd{Map}\hlopt{$<$}\hlstd{MatrixXi}\hlopt{$>$\ }\hlstd{}\hlkwd{A}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{Map}\hlopt{$<$}\hlstd{MatrixXi}\hlopt{$>$\ $>$(}\hlstd{AA}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{m}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{rows}\hlstd{}\hlopt{()),\ }\hlstd{}\hlkwd{n}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{cols}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{MatrixXi}\hlstd{\ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{AtA}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{MatrixXi}\hlstd{}\hlopt{(}\hlstd{n}\hlopt{,\ }\hlstd{n}\hlopt{).}\hlstd{}\hlkwd{setZero}\hlstd{}\hlopt{().}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{selfadjointView}\hlopt{$<$}\hlstd{Lower}\hlopt{$>$().}\hlstd{}\hlkwd{rankUpdate}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()));}\hspace*{\fill}\\ \hlstd{MatrixXi}\hlstd{\ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{AAt}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{MatrixXi}\hlstd{}\hlopt{(}\hlstd{m}\hlopt{,\ }\hlstd{m}\hlopt{).}\hlstd{}\hlkwd{setZero}\hlstd{}\hlopt{().}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{selfadjointView}\hlopt{$<$}\hlstd{Lower}\hlopt{$>$().}\hlstd{}\hlkwd{rankUpdate}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwa{return\ }\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"crossprod(A)"}\hlstd{}\hlopt{{)}}\hlstd{\ \ }\hlopt{=\ }\hlstd{AtA}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"tcrossprod(A)"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{AAt}\hlopt{);}\hlstd{}\hspace*{\fill} \normalfont \normalsize \hrule \caption{\code{crossprodCpp}: Cross-product and transposed cross-product of a single matrix. \label{crossprod}} \end{figure} \begin{CodeInput} R> fcprd <- cxxfunction(signature(AA = "matrix"), crossprodCpp, "RcppEigen") R> str(crp <- fcprd(A)) \end{CodeInput} \begin{CodeOutput} List of 2 $ crossprod(A) : int [1:2, 1:2] 14 32 32 77 $ tcrossprod(A): int [1:3, 1:3] 17 22 27 22 29 36 27 36 45 \end{CodeOutput} \begin{CodeInput} R> stopifnot(all.equal(crp[[1]], crossprod(A)), + all.equal(crp[[2]], tcrossprod(A))) \end{CodeInput} To some, the expressions in Figure~\ref{crossprod} to construct \code{AtA} and \code{AAt} are compact and elegant. To others they are hopelessly confusing. If you find yourself in the latter group, you just need to read the expression left to right. So, for example, we construct \code{AAt} by creating a general integer matrix of size $m\times m$ (where $\bm A$ is $m\times n$), ensuring that all its elements are zero, regarding it as a self-adjoint (i.e., symmetric) matrix using the elements in the lower triangle, then adding $\bm A\bm A^\top$ to it and converting back to a general matrix form (i.e., the strict lower triangle is copied into the strict upper triangle). In more detail: \begin{enumerate} \item \code{MatrixXi(n, n)} creates an $n\times n$ integer matrix with arbitrary contents \item \code{.setZero()} zeros out the contents of the matrix \item \code{.selfAdjointView()} causes what follows to treat the matrix as a symmetric matrix in which only the lower triangle is used, the strict upper triangle being inferred by symmetry \item \code{.rankUpdate(A)} forms the sum $\bm B+\bm A\bm A^\top$ where $\bm B$ is the symmetric matrix of zeros created in the previous steps. \end{enumerate} The assignment of this symmetric matrix to the (general) \code{MatrixXi} object \code{AAt} causes the result to be symmetrized during the assignment. For these products one could define the symmetric matrix from either the lower triangle or the upper triangle as the result will be symmetrized before it is returned. To cut down on repetition of \code{using} statements we gather them in a character variable, \code{incl}, that will be given as the \code{includes} argument in the calls to \code{cxxfunction}. We also define a utility function, \code{AtA}, that returns the crossproduct matrix as shown in Figure~\ref{fig:incl} \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{LLT}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{Lower}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{Map}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{MatrixXd}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{MatrixXi}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{Upper}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using}\hlstd{\ \ \ }\hlkwa{}\hlstd{Eigen}\hlopt{::}\hlstd{VectorXd}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{typedef\ }\hlstd{Map}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$}\hlstd{\ \ }\hlopt{}\hlstd{MapMatd}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{typedef\ }\hlstd{Map}\hlopt{$<$}\hlstd{MatrixXi}\hlopt{$>$}\hlstd{\ \ }\hlopt{}\hlstd{MapMati}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{typedef\ }\hlstd{Map}\hlopt{$<$}\hlstd{VectorXd}\hlopt{$>$}\hlstd{\ \ }\hlopt{}\hlstd{MapVecd}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{inline\ }\hlstd{MatrixXd\ }\hlkwd{AtA}\hlstd{}\hlopt{(}\hlstd{}\hlkwb{const\ }\hlstd{MapMatd}\hlopt{\&\ }\hlstd{A}\hlopt{)\ \{}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{}\hlkwb{int}\hlstd{\ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{n}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{cols}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{}\hlkwa{return}\hlstd{\ \ \ }\hlkwa{}\hlstd{}\hlkwd{MatrixXd}\hlstd{}\hlopt{(}\hlstd{n}\hlopt{,}\hlstd{n}\hlopt{).}\hlstd{}\hlkwd{setZero}\hlstd{}\hlopt{().}\hlstd{selfadjointView}\hlopt{$<$}\hlstd{Lower}\hlopt{$>$()}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{.}\hlstd{}\hlkwd{rankUpdate}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlopt{\}}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{The contents of the character vector, \code{incl}, that will preface \proglang{C++} code segments that follow. \label{fig:incl}} \end{figure} \subsection{Cholesky decomposition of the crossprod} \label{sec:chol} The Cholesky decomposition of the positive-definite, symmetric matrix, $\bm A$, can be written in several forms. Numerical analysts define the ``LLt'' form as the lower triangular matrix, $\bm L$, such that $\bm A=\bm L\bm L^\top$ and the ``LDLt'' form as a unit lower triangular matrix $\bm L$ and a diagonal matrix $\bm D$ with positive diagonal elements such that $\bm A=\bm L\bm D\bm L^\top$. Statisticians often write the decomposition as $\bm A=\bm R^\top\bm R$ where $\bm R$ is an upper triangular matrix. Of course, this $\bm R$ is simply the transpose of $\bm L$ from the ``LLt'' form. The templated \pkg{Eigen} classes for the LLt and LDLt forms are called \code{LLT} and \code{LDLT} and the corresponding methods are \code{.llt()} and \code{.ldlt()}. Because the Cholesky decomposition involves taking square roots, we pass a numeric matrix, $\bm A$, not an integer matrix. % \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwb{const}\hlstd{\ \ }\hlkwb{}\hlstd{LLT}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$\ }\hlstd{}\hlkwd{llt}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{AtA}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapMatd}\hlopt{$>$(}\hlstd{AA}\hlopt{)));}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"L"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{}\hlkwd{MatrixXd}\hlstd{}\hlopt{(}\hlstd{llt}\hlopt{.}\hlstd{}\hlkwd{matrixL}\hlstd{}\hlopt{()),}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"R"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{}\hlkwd{MatrixXd}\hlstd{}\hlopt{(}\hlstd{llt}\hlopt{.}\hlstd{}\hlkwd{matrixU}\hlstd{}\hlopt{()));}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{cholCpp}: Cholesky decomposition of a cross-product. \label{chol}} \end{figure} % \begin{CodeInput} R> storage.mode(A) <- "double" R> fchol <- cxxfunction(signature(AA = "matrix"), cholCpp, "RcppEigen", incl) R> (ll <- fchol(A)) \end{CodeInput} \begin{CodeOutput} $L [,1] [,2] [1,] 3.74166 0.00000 [2,] 8.55236 1.96396 $R [,1] [,2] [1,] 3.74166 8.55236 [2,] 0.00000 1.96396 \end{CodeOutput} \begin{CodeInput} R> stopifnot(all.equal(ll[[2]], chol(crossprod(A)))) \end{CodeInput} \subsection{Determinant of the cross-product matrix} \label{sec:determinant} The ``D-optimal'' criterion for experimental design chooses the design that maximizes the determinant, $|\bm X^\top\bm X|$, for the $n\times p$ model matrix (or Jacobian matrix), $\bm X$. The determinant, $|\bm L|$, of the $p\times p$ lower Cholesky factor $\bm L$, defined so that $\bm L\bm L^\top=\bm X^\top\bm X$, is the product of its diagonal elements, as is the case for any triangular matrix. By the properties of determinants, \begin{displaymath} |\bm X^\top\bm X|=|\bm L\bm L^\top|=|\bm L|\,|\bm L^\top|=|\bm L|^2 \end{displaymath} Alternatively, if using the ``LDLt'' decomposition, $\bm L\bm D\bm L^\top=\bm X^\top\bm X$ where $\bm L$ is unit lower triangular and $\bm D$ is diagonal then $|\bm X^\top\bm X|$ is the product of the diagonal elements of $\bm D$. Because it is known that the diagonal elements of $\bm D$ must be non-negative, one often evaluates the logarithm of the determinant as the sum of the logarithms of the diagonal elements of $\bm D$. Several options are shown in Figure~\ref{cholDet}. % \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwb{const\ }\hlstd{MatrixXd}\hlstd{\ \ }\hlstd{}\hlkwd{ata}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{AtA}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapMatd}\hlopt{$>$(}\hlstd{AA}\hlopt{)));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ double}\hlstd{\ \ \ }\hlkwb{}\hlstd{}\hlkwd{detL}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{MatrixXd}\hlstd{}\hlopt{(}\hlstd{ata}\hlopt{.}\hlstd{}\hlkwd{llt}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{matrixL}\hlstd{}\hlopt{()).}\hlstd{}\hlkwd{diagonal}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{prod}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd\ }\hlkwd{Dvec}\hlstd{}\hlopt{(}\hlstd{ata}\hlopt{.}\hlstd{}\hlkwd{ldlt}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{vectorD}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"d1"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{detL\ }\hlopt{{*}\ }\hlstd{detL}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"d2"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{Dvec}\hlopt{.}\hlstd{}\hlkwd{prod}\hlstd{}\hlopt{(),}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"ld"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{Dvec}\hlopt{.}\hlstd{}\hlkwd{array}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{log}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{sum}\hlstd{}\hlopt{());}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{cholDetCpp}: Determinant of a cross-product using the ``LLt'' and ``LDLt'' forms of the Cholesky decomposition.} \label{cholDet} \end{figure} % \begin{CodeInput} R> fdet <- cxxfunction(signature(AA = "matrix"), cholDetCpp, "RcppEigen", + incl) R> unlist(ll <- fdet(A)) \end{CodeInput} \begin{CodeOutput} d1 d2 ld 54.00000 54.00000 3.98898 \end{CodeOutput} % Note the use of the \code{array()} method in the calculation of the log-determinant. Because the \code{log()} method applies to arrays, not to vectors or matrices, an array must be created from \code{Dvec} before applying the \code{log()} method. \section{Least squares solutions} \label{sec:leastSquares} A common operation in statistical computing is calculating a least squares solution, $\widehat{\bm\beta}$, defined as \begin{displaymath} \widehat{\bm\beta}=\argmin_{\bm \beta}\|\bm y-\bm X\bm\beta\|^2 \end{displaymath} where the model matrix, $\bm X$, is $n\times p$ ($n\ge p$) and $\bm y$ is an $n$-dimensional response vector. There are several ways, based on matrix decompositions, to determine such a solution. Earlier, two forms of the Cholesky decomposition were discussed: ``LLt'' and ``LDLt'', which can both be used to solve for $\widehat{\bm\beta}$. Other decompositions that can be used are the QR decomposition, with or without column pivoting, the singular value decomposition and the eigendecomposition of a symmetric matrix. Determining a least squares solution is relatively straightforward. However, statistical computing often requires additional information, such as the standard errors of the coefficient estimates. Calculating these involves evaluating the diagonal elements of $\left(\bm X^\top\bm X\right)^{-1}$ and the residual sum of squares, $\|\bm y-\bm X\widehat{\bm\beta}\|^2$. \subsection{Least squares using the ``LLt'' Cholesky} \label{sec:LLtLeastSquares} \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwb{const\ }\hlstd{MapMatd}\hlstd{\ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{X}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapMatd}\hlopt{$>$(}\hlstd{XX}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MapVecd}\hlstd{\ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{y}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapVecd}\hlopt{$>$(}\hlstd{yy}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{n}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{rows}\hlstd{}\hlopt{()),\ }\hlstd{}\hlkwd{p}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{cols}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{LLT}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$\ }\hlstd{}\hlkwd{llt}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{AtA}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ }\hlstd{}\hlkwd{betahat}\hlstd{}\hlopt{(}\hlstd{llt}\hlopt{.}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()\ {*}\ }\hlstd{y}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ \ }\hlstd{}\hlkwd{fitted}\hlstd{}\hlopt{(}\hlstd{X\ }\hlopt{{*}\ }\hlstd{betahat}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ \ \ }\hlstd{}\hlkwd{resid}\hlstd{}\hlopt{(}\hlstd{y\ }\hlopt{{-}\ }\hlstd{fitted}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{df}\hlstd{}\hlopt{(}\hlstd{n\ }\hlopt{{-}\ }\hlstd{p}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ double}\hlstd{\ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{s}\hlstd{}\hlopt{(}\hlstd{resid}\hlopt{.}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{()\ /\ }\hlstd{std}\hlopt{::}\hlstd{}\hlkwd{sqrt}\hlstd{}\hlopt{(}\hlstd{}\hlkwb{double}\hlstd{}\hlopt{(}\hlstd{df}\hlopt{)));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ \ \ \ \ \ }\hlstd{}\hlkwd{se}\hlstd{}\hlopt{(}\hlstd{s\ }\hlopt{{*}\ }\hlstd{llt}\hlopt{.}\hlstd{}\hlkwd{matrixL}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{MatrixXd}\hlopt{::}\hlstd{}\hlkwd{Identity}\hlstd{}\hlopt{(}\hlstd{p}\hlopt{,\ }\hlstd{p}\hlopt{))}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{.}\hlstd{}\hlkwd{colwise}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwa{return}\hlstd{\ \ \ \ \ }\hlkwa{}\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"coefficients"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ }\hlopt{=\ }\hlstd{betahat}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"fitted.values"}\hlstd{}\hlopt{{)}}\hlstd{\ \ }\hlopt{=\ }\hlstd{fitted}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"residuals"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ \ \ \ }\hlopt{=\ }\hlstd{resid}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"s"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlopt{=\ }\hlstd{s}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"df.residual"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ \ }\hlopt{=\ }\hlstd{df}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"rank"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ \ \ \ \ \ \ \ \ }\hlopt{=\ }\hlstd{p}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"Std.\ Error"}\hlstd{}\hlopt{{)}}\hlstd{\ \ \ \ \ }\hlopt{=\ }\hlstd{se}\hlopt{);}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{lltLSCpp}: Least squares using the Cholesky decomposition. \label{lltLS}} \end{figure} Figure~\ref{lltLS} shows a calculation of the least squares coefficient estimates (\code{betahat}) and the standard errors (\code{se}) through an ``LLt'' Cholesky decomposition of the crossproduct of the model matrix, $\bm X$. Next, the results from this calculation are compared to those from the \code{lm.fit} function in \proglang{R} (\code{lm.fit} is the workhorse function called by \code{lm} once the model matrix and response have been evaluated). \begin{CodeInput} R> lltLS <- cxxfunction(signature(XX = "matrix", yy = "numeric"), lltLSCpp, + "RcppEigen", incl) R> data("trees", package = "datasets") R> str(lltFit <- with(trees, lltLS(cbind(1, log(Girth)), log(Volume)))) \end{CodeInput} \begin{CodeOutput} List of 7 $ coefficients : num [1:2] -2.35 2.2 $ fitted.values: num [1:31] 2.3 2.38 2.43 2.82 2.86 ... $ residuals : num [1:31] 0.0298 -0.0483 -0.1087 -0.0223 0.0727 ... $ s : num 0.115 $ df.residual : int 29 $ rank : int 2 $ Std. Error : num [1:2] 0.2307 0.0898 R> str(lmFit <- with(trees, lm.fit(cbind(1, log(Girth)), log(Volume)))) List of 8 $ coefficients : Named num [1:2] -2.35 2.2 ..- attr(*, "names")= chr [1:2] "x1" "x2" $ residuals : num [1:31] 0.0298 -0.0483 -0.1087 -0.0223 0.0727 ... $ effects : Named num [1:31] -18.2218 2.8152 -0.1029 -0.0223 0.0721 ... ..- attr(*, "names")= chr [1:31] "x1" "x2" "" "" ... $ rank : int 2 $ fitted.values: num [1:31] 2.3 2.38 2.43 2.82 2.86 ... $ assign : NULL $ qr :List of 5 ..$ qr : num [1:31, 1:2] -5.57 0.18 0.18 0.18 0.18 ... ..$ qraux: num [1:2] 1.18 1.26 ..$ pivot: int [1:2] 1 2 ..$ tol : num 1e-07 ..$ rank : int 2 ..- attr(*, "class")= chr "qr" $ df.residual : int 29 \end{CodeOutput} \begin{CodeInput} R> for(nm in c("coefficients", "residuals", "fitted.values", "rank")) + stopifnot(all.equal(lltFit[[nm]], unname(lmFit[[nm]]))) R> stopifnot(all.equal(lltFit[["Std. Error"]], + unname(coef(summary(lm(log(Volume) ~ log(Girth), trees)))[,2]))) \end{CodeInput} There are several aspects of the \proglang{C++} code in Figure~\ref{lltLS} worth mentioning. The \code{solve} method for the \code{LLT} object evaluates, in this case, $\left(\bm X^\top\bm X\right)^{-1}\bm X^\top\bm y$ but without actually evaluating the inverse. The calculation of the residuals, $\bm y-\widehat{\bm y}$, can be written, as in \proglang{R}, as \code{y - fitted}. (But note that \pkg{Eigen} classes do not have a ``recycling rule'' as in \proglang{R}. That is, the two vector operands must have the same length.) The \code{norm()} method evaluates the square root of the sum of squares of the elements of a vector. Although one does not explicitly evaluate $\left(\bm X^\top\bm X\right)^{-1}$ one does evaluate $\bm L^{-1}$ to obtain the standard errors. Note also the use of the \code{colwise()} method in the evaluation of the standard errors. It applies a method to the columns of a matrix, returning a vector. The \pkg{Eigen} \code{colwise()} and \code{rowwise()} methods are similar in effect to the \code{apply} function in \proglang{R}. In the descriptions of other methods for solving least squares problems, much of the code parallels that shown in Figure~\ref{lltLS}. The redundant parts are omitted, and only the evaluation of the coefficients, the rank and the standard errors is shown. Actually, the standard errors are calculated only up to the scalar multiple of $s$, the residual standard error, in these code fragments. The calculation of the residuals and $s$ and the scaling of the coefficient standard errors is the same for all methods. (See the files \code{fastLm.h} and \code{fastLm.cpp} in the \pkg{RcppEigen} source package for details.) \subsection{Least squares using the unpivoted QR decomposition} \label{sec:QR} A QR decomposition has the form \begin{displaymath} \bm X=\bm Q\bm R=\bm Q_1\bm R_1 \end{displaymath} where $\bm Q$ is an $n\times n$ orthogonal matrix, which means that $\bm Q^\top\bm Q=\bm Q\bm Q^\top=\bm I_n$, and the $n\times p$ matrix $\bm R$ is zero below the main diagonal. The $n\times p$ matrix $\bm Q_1$ is the first $p$ columns of $\bm Q$ and the $p\times p$ upper triangular matrix $\bm R_1$ is the top $p$ rows of $\bm R$. There are three \pkg{Eigen} classes for the QR decomposition: \code{HouseholderQR} provides the basic QR decomposition using Householder transformations, \code{ColPivHouseholderQR} incorporates column pivots and \code{FullPivHouseholderQR} incorporates both row and column pivots. Figure~\ref{QRLS} shows a least squares solution using the unpivoted QR decomposition. The calculations in Figure~\ref{QRLS} are quite similar to those in Figure~\ref{lltLS}. In fact, if one had extracted the upper triangular factor (the \code{matrixU()} method) from the \code{LLT} object in Figure~\ref{lltLS}, the rest of the code would be nearly identical. \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{HouseholderQR}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwb{const\ }\hlstd{HouseholderQR}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$\ }\hlstd{}\hlkwd{QR}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd\ }\hlkwd{betahat}\hlstd{}\hlopt{(}\hlstd{QR}\hlopt{.}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{y}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ }\hlstd{}\hlkwd{fitted}\hlstd{}\hlopt{(}\hlstd{X\ }\hlopt{{*}\ }\hlstd{betahat}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{df}\hlstd{}\hlopt{(}\hlstd{n\ }\hlopt{{-}\ }\hlstd{p}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ \ \ \ \ }\hlstd{}\hlkwd{se}\hlstd{}\hlopt{(}\hlstd{QR}\hlopt{.}\hlstd{}\hlkwd{matrixQR}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{topRows}\hlstd{}\hlopt{(}\hlstd{p}\hlopt{).}\hlstd{triangularView}\hlopt{$<$}\hlstd{Upper}\hlopt{$>$()}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{.}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{MatrixXd}\hlopt{::}\hlstd{}\hlkwd{Identity}\hlstd{}\hlopt{(}\hlstd{p}\hlopt{,}\hlstd{p}\hlopt{)).}\hlstd{}\hlkwd{rowwise}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{());}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{QRLSCpp}: Least squares using the unpivoted QR decomposition. \label{QRLS}} \end{figure} \subsection{Handling the rank-deficient case} \label{sec:rankdeficient} One important consideration when determining least squares solutions is whether $\rank(\bm X)$ is $p$, a situation described by saying that $\bm X$ has ``full column rank''. When $\bm X$ does not have full column rank it is said to be ``rank deficient''. Although the theoretical rank of a matrix is well-defined, its evaluation in practice is not. At best one can compute an effective rank according to some tolerance. Decompositions that allow to estimation of the rank of the matrix in this way are said to be ``rank-revealing''. Because the \code{model.matrix} function in \proglang{R} does a considerable amount of symbolic analysis behind the scenes, one usually ends up with full-rank model matrices. The common cases of rank-deficiency, such as incorporating both a constant term and a full set of indicators columns for the levels of a factor, are eliminated. Other, more subtle, situations will not be detected at this stage, however. A simple example occurs when there is a ``missing cell'' in a two-way layout and the interaction of the two factors is included in the model. \begin{CodeInput} R> dd <- data.frame(f1 = gl(4, 6, labels = LETTERS[1:4]), + f2 = gl(3, 2, labels = letters[1:3]))[-(7:8), ] R> xtabs(~ f2 + f1, dd) \end{CodeInput} \begin{CodeOutput} f1 f2 A B C D a 2 0 2 2 b 2 2 2 2 c 2 2 2 2 \end{CodeOutput} \begin{CodeInput} R> mm <- model.matrix(~ f1 * f2, dd) R> kappa(mm) \end{CodeInput} \begin{CodeOutput} [1] 4.30923e+16 \end{CodeOutput} This yields a large condition number, indicating rank deficiency. Alternatively, the reciprocal condition number can be evaluated. \begin{CodeInput} R> rcond(mm) \end{CodeInput} \begin{CodeOutput} [1] 2.3206e-17 \end{CodeOutput} \begin{CodeInput} R> (c(rank = qr(mm)$rank, p = ncol(mm))) \end{CodeInput} \begin{CodeOutput} rank p 11 12 \end{CodeOutput} \begin{CodeInput} R> set.seed(1) R> dd$y <- mm %*% seq_len(ncol(mm)) + rnorm(nrow(mm), sd = 0.1) R> fm1 <- lm(y ~ f1 * f2, dd) R> writeLines(capture.output(print(summary(fm1), + signif.stars = FALSE))[9:22]) \end{CodeInput} \begin{CodeOutput} Coefficients: (1 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) 0.9779 0.0582 16.8 3.4e-09 f1B 12.0381 0.0823 146.3 < 2e-16 f1C 3.1172 0.0823 37.9 5.2e-13 f1D 4.0685 0.0823 49.5 2.8e-14 f2b 5.0601 0.0823 61.5 2.6e-15 f2c 5.9976 0.0823 72.9 4.0e-16 f1B:f2b -3.0148 0.1163 -25.9 3.3e-11 f1C:f2b 7.7030 0.1163 66.2 1.2e-15 f1D:f2b 8.9643 0.1163 77.1 < 2e-16 f1B:f2c NA NA NA NA f1C:f2c 10.9613 0.1163 94.2 < 2e-16 f1D:f2c 12.0411 0.1163 103.5 < 2e-16 \end{CodeOutput} The \code{lm} function for fitting linear models in \proglang{R} uses a rank-revealing form of the QR decomposition. When the model matrix is determined to be rank deficient, according to the threshold used in \proglang{R}'s \code{qr} function, the model matrix is reduced to $\rank{(\bm X)}$ columns by pivoting selected columns (those that are apparently linearly dependent on columns to their left) to the right hand side of the matrix. A solution for this reduced model matrix is determined and the coefficients and standard errors for the redundant columns are flagged as missing. An alternative approach is to evaluate the ``pseudo-inverse'' of $\bm X$ from the singular value decomposition (SVD) of $\bm X$ or the eigendecomposition of $\bm X^\top\bm X$. The SVD is of the form \begin{displaymath} \bm X=\bm U\bm D\bm V^\top=\bm U_1\bm D_1\bm V^\top \end{displaymath} where $\bm U$ is an orthogonal $n\times n$ matrix and $\bm U_1$ is its leftmost $p$ columns, $\bm D$ is $n\times p$ and zero off the main diagonal so that $\bm D_1$ is a $p\times p$ diagonal matrix with non-increasing, non-negative diagonal elements, and $\bm V$ is a $p\times p$ orthogonal matrix. The pseudo-inverse of $\bm D_1$, written $\bm D_1^+$ is a $p\times p$ diagonal matrix whose first $r=\rank(\bm X)$ diagonal elements are the inverses of the corresponding diagonal elements of $\bm D_1$ and whose last $p-r$ diagonal elements are zero. The tolerance for determining if an element of the diagonal of $\bm D_1$ is considered to be (effectively) zero is a multiple of the largest singular value (i.e., the $(1,1)$ element of $\bm D$). The pseudo-inverse, $\bm X^+$, of $\bm X$ is defined as \begin{displaymath} \bm X^+=\bm V\bm D_1^+\bm U_1^\top . \end{displaymath} \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwc{inline\ }\hlstd{ArrayXd\ }\hlkwd{Dplus}\hlstd{}\hlopt{(}\hlstd{}\hlkwb{const\ }\hlstd{ArrayXd}\hlopt{\&\ }\hlstd{d}\hlopt{)\ \{}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{ArrayXd}\hlstd{\ \ \ }\hlstd{}\hlkwd{di}\hlstd{}\hlopt{(}\hlstd{d}\hlopt{.}\hlstd{}\hlkwd{size}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{}\hlkwb{double}\hlstd{\ \ }\hlkwb{}\hlstd{}\hlkwd{comp}\hlstd{}\hlopt{(}\hlstd{d}\hlopt{.}\hlstd{}\hlkwd{maxCoeff}\hlstd{}\hlopt{()\ {*}\ }\hlstd{}\hlkwd{threshold}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{}\hlkwa{for\ }\hlstd{}\hlopt{(}\hlstd{}\hlkwb{int\ }\hlstd{j\ }\hlopt{=\ }\hlstd{}\hlnum{0}\hlstd{}\hlopt{;\ }\hlstd{j\ }\hlopt{$<$\ }\hlstd{d}\hlopt{.}\hlstd{}\hlkwd{size}\hlstd{}\hlopt{();\ ++}\hlstd{j}\hlopt{)\ }\hlstd{di}\hlopt{{[}}\hlstd{j}\hlopt{{]}\ =\ (}\hlstd{d}\hlopt{{[}}\hlstd{j}\hlopt{{]}\ $<$\ }\hlstd{comp}\hlopt{)\ }\hlstd{?\ }\hlnum{0}\hlstd{}\hlopt{.\ :\ }\hlstd{}\hlnum{1}\hlstd{}\hlopt{./}\hlstd{d}\hlopt{{[}}\hlstd{j}\hlopt{{]};}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{}\hlkwa{return\ }\hlstd{di}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlopt{\}}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{DplusCpp}: Create the diagonal $\bm d^+$ of the pseudo-inverse, $\bm D_1^+$, from the array of singular values, $\bm d$. \label{Dplus}} \end{figure} In Figure~\ref{Dplus} a utility function, \code{Dplus}, is defined to return the diagonal of the pseudo-inverse, $\bm D_1^+$, as an array, given the singular values (the diagonal of $\bm D_1$) as an array. Calculation of the maximum element of $\bm d$ (the method is called \code{.maxCoeff()}) and the use of a \code{threshold()} function provides greater generality for the function. It can be used on the eigenvalues of $\bm X^\top\bm X$, as shown in Section~\ref{sec:eigendecomp}, even though these are returned in increasing order, as opposed to the decreasing order of the singular values. \subsection{Least squares using the SVD} \label{sec:SVDls} \begin{figure}[b!] \hrule \smallskip \noindent \ttfamily \hlkwb{const\ }\hlstd{Eigen}\hlopt{::}\hlstd{JacobiSVD}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{UDV}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{jacobiSvd}\hlstd{}\hlopt{(}\hlstd{Eigen}\hlopt{::}\hlstd{ComputeThinU}\hlopt{\textbar }\hlstd{Eigen}\hlopt{::}\hlstd{ComputeThinV}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{ArrayXd}\hlstd{\ \ \ \ \ \ \ }\hlstd{}\hlkwd{Dp}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{Dplus}\hlstd{}\hlopt{(}\hlstd{UDV}\hlopt{.}\hlstd{}\hlkwd{singularValues}\hlstd{}\hlopt{()));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{r}\hlstd{}\hlopt{((}\hlstd{Dp\ }\hlopt{$>$\ }\hlstd{}\hlnum{0}\hlstd{}\hlopt{).}\hlstd{}\hlkwd{count}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MatrixXd}\hlstd{\ \ \ \ \ }\hlstd{}\hlkwd{VDp}\hlstd{}\hlopt{(}\hlstd{UDV}\hlopt{.}\hlstd{}\hlkwd{matrixV}\hlstd{}\hlopt{()\ {*}\ }\hlstd{Dp}\hlopt{.}\hlstd{}\hlkwd{matrix}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{asDiagonal}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd\ }\hlkwd{betahat}\hlstd{}\hlopt{(}\hlstd{VDp\ }\hlopt{{*}\ }\hlstd{UDV}\hlopt{.}\hlstd{}\hlkwd{matrixU}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()\ {*}\ }\hlstd{y}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ \ \ \ \ }\hlstd{}\hlkwd{se}\hlstd{}\hlopt{(}\hlstd{s\ }\hlopt{{*}\ }\hlstd{VDp}\hlopt{.}\hlstd{}\hlkwd{rowwise}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{());}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{SVDLSCpp}: Least squares using the SVD. \label{SVDLS}} \end{figure} With these definitions the code for least squares using the singular value decomposition can be written as in Figure~\ref{SVDLS}. Even in the rank-deficient case this code will produce a complete set of coefficients and their standard errors. The user must be careful to check if the computed rank is less than $p$, the number of columns in $\bm X$, in which case the estimated coefficients are just one of an infinite number of coefficient vectors that produce the same fitted values. It happens that the solution from this pseudo-inverse is the minimum norm solution (in the sense that $\|\bm\beta\|^2$ is minimized among those $\bm\beta$ that minimize $\|\bm y-\bm X\bm\beta\|^2$). The standard errors of the coefficient estimates in the rank-deficient case must be interpreted carefully. The solution with one or more missing coefficients, as returned by the \code{lm.fit} function in \proglang{R} and by the column-pivoted QR decomposition described in Section~\ref{sec:colPivQR}, does not provide standard errors for the missing coefficients. That is, both the coefficient and its standard error are returned as \code{NA} because the least squares solution is performed on a reduced model matrix. It is also true that the solution returned by the SVD method is with respect to a reduced model matrix but the $p$ coefficient estimates and their $p$ standard errors don't show this. They are, in fact, linear combinations of a set of $r$ coefficient estimates and their standard errors. \pagebreak \subsection{Least squares using the eigendecomposition} \label{sec:eigendecomp} The eigendecomposition of $\bm X^\top\bm X$ is defined as \begin{displaymath} \bm X^\top\bm X=\bm V\bm\Lambda\bm V^\top \end{displaymath} where $\bm V$, the matrix of eigenvectors, is a $p\times p$ orthogonal matrix and $\bm\Lambda$ is a $p\times p$ diagonal matrix with non-decreasing, non-negative diagonal elements, called the eigenvalues of $\bm X^\top\bm X$. When the eigenvalues are distinct, this $\bm V$ is the same (up to reordering of the columns) as that in the SVD and the eigenvalues of $\bm X^\top\bm X$ are the (reordered) squares of the singular values of $\bm X$. With these definitions one can adapt much of the code from the SVD method for the eigendecomposition, as shown in Figure~\ref{SymmEigLS}. \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlkwb{const\ }\hlstd{Eigen}\hlopt{::}\hlstd{SelfAdjointEigenSolver}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$\ }\hlstd{}\hlkwd{VLV}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{AtA}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{ArrayXd}\hlstd{\ \ \ \ \ \ \ }\hlstd{}\hlkwd{Dp}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{Dplus}\hlstd{}\hlopt{(}\hlstd{eig}\hlopt{.}\hlstd{}\hlkwd{eigenvalues}\hlstd{}\hlopt{()).}\hlstd{}\hlkwd{sqrt}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{r}\hlstd{}\hlopt{((}\hlstd{Dp\ }\hlopt{$>$\ }\hlstd{}\hlnum{0}\hlstd{}\hlopt{).}\hlstd{}\hlkwd{count}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MatrixXd}\hlstd{\ \ \ \ \ }\hlstd{}\hlkwd{VDp}\hlstd{}\hlopt{(}\hlstd{VLV}\hlopt{.}\hlstd{}\hlkwd{eigenvectors}\hlstd{}\hlopt{()\ {*}\ }\hlstd{Dp}\hlopt{.}\hlstd{}\hlkwd{matrix}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{asDiagonal}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd\ }\hlkwd{betahat}\hlstd{}\hlopt{(}\hlstd{VDp\ }\hlopt{{*}\ }\hlstd{VDp}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()\ {*}\ }\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()\ {*}\ }\hlstd{y}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ \ \ \ \ }\hlstd{}\hlkwd{se}\hlstd{}\hlopt{(}\hlstd{s\ }\hlopt{{*}\ }\hlstd{VDp}\hlopt{.}\hlstd{}\hlkwd{rowwise}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{());}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{SymmEigLSCpp}: Least squares using the eigendecomposition. \label{SymmEigLS}} \end{figure} \subsection{Least squares using the column-pivoted QR decomposition} \label{sec:colPivQR} The column-pivoted QR decomposition provides results similar to those from \proglang{R} in both the full-rank and the rank-deficient cases. The decomposition is of the form \begin{displaymath} \bm X\bm P=\bm Q\bm R=\bm Q_1\bm R_1 \end{displaymath} where, as before, $\bm Q$ is $n\times n$ and orthogonal and $\bm R$ is $n\times p$ and upper triangular. The $p\times p$ matrix $\bm P$ is a permutation matrix. That is, its columns are a permutation of the columns of $\bm I_p$. It serves to reorder the columns of $\bm X$ so that the diagonal elements of $\bm R$ are non-increasing in magnitude. An instance of the class \code{Eigen::ColPivHouseholderQR} has a \code{rank()} method returning the computational rank of the matrix. When $\bm X$ is of full rank one can use essentially the same code as in the unpivoted decomposition except that one must reorder the standard errors. When $\bm X$ is rank-deficient, the coefficients and standard errors are evaluated for the leading $r$ columns of $\bm X\bm P$ only. In the rank-deficient case the straightforward calculation of the fitted values, as $\bm X\widehat{\bm\beta}$, cannot be used because some of the estimated coefficients, $\widehat{\bm\beta}$, are \code{NA} so the product will be a vector of \code{NA}'s. One could do some complicated rearrangement of the columns of X and the coefficient estimates but it is conceptually (and computationally) easier to employ the relationship \begin{displaymath} \widehat{\bm y} = \bm Q_1\bm Q_1^\top\bm y=\bm Q \begin{bmatrix} \bm I_r & \bm 0\\ \bm 0 & \bm 0 \end{bmatrix} \bm Q^\top\bm y \end{displaymath} The vector $\bm Q^\top\bm y$ is called the ``effects'' vector in \proglang{R}. \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwc{typedef\ }\hlstd{Eigen}\hlopt{::}\hlstd{ColPivHouseholderQR}\hlopt{$<$}\hlstd{MatrixXd}\hlopt{$>$}\hlstd{\ \ }\hlopt{}\hlstd{CPivQR}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{typedef\ }\hlstd{CPivQR}\hlopt{::}\hlstd{PermutationType}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Permutation}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwb{const\ }\hlstd{CPivQR}\hlstd{\ \ \ \ \ \ \ }\hlstd{}\hlkwd{PQR}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{Permutation\ }\hlkwd{Pmat}\hlstd{}\hlopt{(}\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{colsPermutation}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ int}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ }\hlkwb{}\hlstd{}\hlkwd{r}\hlstd{}\hlopt{(}\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{rank}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{VectorXd}\hlstd{\ \ \ \ \ \ \ }\hlstd{betahat}\hlopt{,\ }\hlstd{fitted}\hlopt{,\ }\hlstd{se}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{if\ }\hlstd{}\hlopt{(}\hlstd{r\ }\hlopt{==\ }\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{cols}\hlstd{}\hlopt{())\ \{\ }\hlstd{}\hlslc{//\ full\ rank\ case}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{betahat\ }\hlopt{=\ }\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{y}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{fitted}\hlstd{\ \ }\hlstd{}\hlopt{=\ }\hlstd{X\ }\hlopt{{*}\ }\hlstd{betahat}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{se}\hlstd{\ \ \ \ \ \ }\hlstd{}\hlopt{=\ }\hlstd{Pmat\ }\hlopt{{*}\ }\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{matrixQR}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{topRows}\hlstd{}\hlopt{(}\hlstd{p}\hlopt{).}\hlstd{triangularView}\hlopt{$<$}\hlstd{Upper}\hlopt{$>$()}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{.}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{MatrixXd}\hlopt{::}\hlstd{}\hlkwd{Identity}\hlstd{}\hlopt{(}\hlstd{p}\hlopt{,\ }\hlstd{p}\hlopt{)).}\hlstd{}\hlkwd{rowwise}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{();}\hspace*{\fill}\\ \hlstd{}\hlopt{\}\ }\hlstd{}\hlkwa{else\ }\hlstd{}\hlopt{\{}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{MatrixXd}\hlstd{\ \ \ \ }\hlstd{}\hlkwd{Rinv}\hlstd{}\hlopt{(}\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{matrixQR}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{topLeftCorner}\hlstd{}\hlopt{(}\hlstd{r}\hlopt{,\ }\hlstd{r}\hlopt{)}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{.}\hlstd{triangularView}\hlopt{$<$}\hlstd{Upper}\hlopt{$>$().}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{MatrixXd}\hlopt{::}\hlstd{}\hlkwd{Identity}\hlstd{}\hlopt{(}\hlstd{r}\hlopt{,\ }\hlstd{r}\hlopt{)));}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{VectorXd\ }\hlkwd{effects}\hlstd{}\hlopt{(}\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{householderQ}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{()\ {*}\ }\hlstd{y}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{betahat}\hlopt{.}\hlstd{}\hlkwd{fill}\hlstd{}\hlopt{(::}\hlstd{NA\textunderscore REAL}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{betahat}\hlopt{.}\hlstd{}\hlkwd{head}\hlstd{}\hlopt{(}\hlstd{r}\hlopt{)}\hlstd{\ \ }\hlopt{=\ }\hlstd{Rinv\ }\hlopt{{*}\ }\hlstd{effects}\hlopt{.}\hlstd{}\hlkwd{head}\hlstd{}\hlopt{(}\hlstd{r}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{betahat}\hlstd{\ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{=\ }\hlstd{Pmat\ }\hlopt{{*}\ }\hlstd{betahat}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{se}\hlopt{.}\hlstd{}\hlkwd{fill}\hlstd{}\hlopt{(::}\hlstd{NA\textunderscore REAL}\hlopt{);}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{se}\hlopt{.}\hlstd{}\hlkwd{head}\hlstd{}\hlopt{(}\hlstd{r}\hlopt{)}\hlstd{\ \ \ \ \ \ \ }\hlopt{=\ }\hlstd{Rinv}\hlopt{.}\hlstd{}\hlkwd{rowwise}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{norm}\hlstd{}\hlopt{();}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{se}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{=\ }\hlstd{Pmat\ }\hlopt{{*}\ }\hlstd{se}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlslc{//\ create\ fitted\ values\ from\ effects}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{effects}\hlopt{.}\hlstd{}\hlkwd{tail}\hlstd{}\hlopt{(}\hlstd{X}\hlopt{.}\hlstd{}\hlkwd{rows}\hlstd{}\hlopt{()\ {-}\ }\hlstd{r}\hlopt{).}\hlstd{}\hlkwd{setZero}\hlstd{}\hlopt{();}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ }\hlstd{fitted}\hlstd{\ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlopt{=\ }\hlstd{PQR}\hlopt{.}\hlstd{}\hlkwd{householderQ}\hlstd{}\hlopt{()\ {*}\ }\hlstd{effects}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlopt{\}}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{ColPivQRLSCpp}: Least squares using the pivoted QR decomposition. \label{ColPivQRLS}} \end{figure} Just to check that the code in Figure~\ref{ColPivQRLS} does indeed provide the desired answer \begin{CodeInput} R> print(summary(fmPQR <- fastLm(y ~ f1 * f2, dd)), signif.st = FALSE) \end{CodeInput} \begin{CodeOutput} Call: fastLm.formula(formula = y ~ f1 * f2, data = dd) Estimate Std. Error t value Pr(>|t|) (Intercept) 0.977859 0.058165 16.812 3.413e-09 f1B 12.038068 0.082258 146.346 < 2.2e-16 f1C 3.117222 0.082258 37.896 5.221e-13 f1D 4.068523 0.082258 49.461 2.833e-14 f2b 5.060123 0.082258 61.516 2.593e-15 f2c 5.997592 0.082258 72.912 4.015e-16 f1B:f2b -3.014763 0.116330 -25.916 3.266e-11 f1C:f2b 7.702999 0.116330 66.217 1.156e-15 f1D:f2b 8.964251 0.116330 77.059 < 2.2e-16 f1B:f2c NA NA NA NA f1C:f2c 10.961326 0.116330 94.226 < 2.2e-16 f1D:f2c 12.041081 0.116330 103.508 < 2.2e-16 Residual standard error: 0.2868 on 11 degrees of freedom Multiple R-squared: 0.9999, Adjusted R-squared: 0.9999 \end{CodeOutput} \begin{CodeInput} R> all.equal(coef(fm1), coef(fmPQR)) \end{CodeInput} \begin{CodeOutput} [1] TRUE \end{CodeOutput} \begin{CodeInput} R> all.equal(unname(fitted(fm1)), fitted(fmPQR)) \end{CodeInput} \begin{CodeOutput} [1] TRUE \end{CodeOutput} \begin{CodeInput} R> all.equal(unname(residuals(fm1)), residuals(fmPQR)) \end{CodeInput} \begin{CodeOutput} [1] TRUE \end{CodeOutput} The rank-revealing SVD method produces the same fitted values but not the same coefficients. \begin{CodeInput} R> print(summary(fmSVD <- fastLm(y ~ f1 * f2, dd, method = 4)), + signif.st = FALSE) \end{CodeInput} \begin{CodeOutput} Call: fastLm.formula(formula = y ~ f1 * f2, data = dd, method = 4) Estimate Std. Error t value Pr(>|t|) (Intercept) 0.977859 0.058165 16.812 3.413e-09 f1B 7.020458 0.038777 181.049 < 2.2e-16 f1C 3.117222 0.082258 37.896 5.221e-13 f1D 4.068523 0.082258 49.461 2.833e-14 f2b 5.060123 0.082258 61.516 2.593e-15 f2c 5.997592 0.082258 72.912 4.015e-16 f1B:f2b 2.002847 0.061311 32.667 2.638e-12 f1C:f2b 7.702999 0.116330 66.217 1.156e-15 f1D:f2b 8.964251 0.116330 77.059 < 2.2e-16 f1B:f2c 5.017610 0.061311 81.838 < 2.2e-16 f1C:f2c 10.961326 0.116330 94.226 < 2.2e-16 f1D:f2c 12.041081 0.116330 103.508 < 2.2e-16 Residual standard error: 0.2868 on 11 degrees of freedom Multiple R-squared: 0.9999, Adjusted R-squared: 0.9999 \end{CodeOutput} \begin{CodeInput} R> all.equal(coef(fm1), coef(fmSVD)) \end{CodeInput} \begin{CodeOutput} [1] "'is.NA' value mismatch: 0 in current 1 in target" \end{CodeOutput} \begin{CodeInput} R> all.equal(unname(fitted(fm1)), fitted(fmSVD)) \end{CodeInput} \begin{CodeOutput} [1] TRUE \end{CodeOutput} \begin{CodeInput} R> all.equal(unname(residuals(fm1)), residuals(fmSVD)) \end{CodeInput} \begin{CodeOutput} [1] TRUE \end{CodeOutput} The coefficients from the symmetric eigendecomposition method are the same as those from the SVD, hence the fitted values and residuals must be the same for these two methods. \begin{CodeInput} R> summary(fmVLV <- fastLm(y ~ f1 * f2, dd, method = 5)) R> all.equal(coef(fmSVD), coef(fmVLV)) \end{CodeInput} \begin{CodeOutput} [1] TRUE \end{CodeOutput} \subsection{Comparative speed} In the \pkg{RcppEigen} package the \proglang{R} function to fit linear models using the methods described above is called \code{fastLm}. It follows an earlier example in the \pkg{Rcpp} package which was carried over to both \pkg{RcppArmadillo} and \pkg{RcppGSL}. The natural question to ask is, ``Is it indeed fast to use these methods based on \pkg{Eigen}?''. To this end, the package provides benchmarking code for these methods, \proglang{R}'s \code{lm.fit} function and the \code{fastLm} implementations in the \pkg{RcppArmadillo} \citep{CRAN:RcppArmadillo} and \pkg{RcppGSL} \citep{CRAN:RcppGSL} packages, if they are installed. The benchmark code, which uses the \pkg{rbenchmark} \citep{CRAN:rbenchmark} package, is in a file named \code{lmBenchmark.R} in the \code{examples} subdirectory of the installed \pkg{RcppEigen} package. It can be run as \begin{CodeInput} R> source(system.file("examples", "lmBenchmark.R", package = "RcppEigen")) \end{CodeInput} Results will vary according to the speed of the processor and the implementation of the BLAS (Basic Linear Algebra Subroutines) used. (\pkg{Eigen} methods do not use the BLAS but the other methods do.) The \pkg{Eigen}3 template library does not use multi-threaded code for these operations but does use the graphics pipeline instructions (SSE and SSE2, in this case) in some calculations. \begin{table}[t!] \centering \begin{tabular}{r r r r r} \hline Method & Relative& Elapsed & User & Sys \\ \hline LDLt & 1.00 & 1.18 & 1.17 & 0.00 \\ LLt & 1.01 & 1.19 & 1.17 & 0.00 \\ SymmEig & 2.76 & 3.25 & 2.70 & 0.52 \\ QR & 6.35 & 7.47 & 6.93 & 0.53 \\ arma & 6.60 & 7.76 & 25.69 & 4.47 \\ PivQR & 7.15 & 8.41 & 7.78 & 0.62 \\ lm.fit & 11.68 & 13.74 & 21.56 & 16.79 \\ GESDD & 12.58 & 14.79 & 44.01 & 10.96 \\ SVD & 44.48 & 52.30 & 51.38 & 0.80 \\ GSL & 150.46 & 176.95 & 210.52 & 149.86 \\ \hline \end{tabular} \caption{\code{lmBenchmark} results on a desktop computer for the default size, $100,000\times 40$, full-rank model matrix running 20 repetitions for each method. Times (Elapsed, User, and Sys) are in seconds. The BLAS in use is a locally-rebuilt version of the OpenBLAS library included with Ubuntu 11.10. \label{tab:lmRes}} \end{table} Results obtained on a desktop computer, circa 2010, are shown in Table~\ref{tab:lmRes}. The processor used for these timings is a 4-core processor but almost all the methods are single-threaded and not affected by the number of cores. Only the \code{arma}, \code{lm.fit}, \code{GESDD} and \code{GSL} methods benefit from the multi-threaded BLAS implementation provided by OpenBLAS, and the relative speed increase is modest for this problem size and number of cores (at 7.76 seconds relative to 10.29 seconds for \code{arma}, 13.74 seconds relative to 16.91 seconds for \code{lm.fit}, and 176.95 seconds relative to 193.29 seconds for the \code{GSL}). Parallel computing approaches will always have to trade-off increased communication and overhead costs against the potential gains from running multiple execution threads. % Nonetheless, with the ongoing %shift to multi-core architectures and an ever increasing number of cores in %modern processing units, it is conceivable that \pkg{Eigen} may also switch %to a multi-threaded approach to further improve its performance. These results indicate that methods based on forming and decomposing $\bm X^\top\bm X$ (LDLt, LLt and SymmEig) are considerably faster than the others. The SymmEig method, using a rank-revealing decomposition, would be preferred, although the LDLt method could probably be modified to be rank-revealing. However, the dimensions of the problem will influence the comparative results. Because there are 100,000 rows in $\bm X$, methods that decompose the whole $\bm X$ matrix (all the methods except those named above) will be at a disadvantage. The pivoted QR method is 1.6 times faster than \proglang{R}'s \code{lm.fit} on this test and provides nearly the same information as \code{lm.fit}. Methods based on the singular value decomposition (SVD and GSL) are much slower but, as mentioned above, this is caused in part by $\bm X$ having many more rows than columns. The GSL method from the GNU Scientific Library uses an older algorithm for the SVD and is clearly out of contention. The \code{GESDD} method provides an interesting hybrid: It uses the \pkg{Eigen} classes, but then deploys the LAPACK routine \code{dgesdd} for the actual SVD calculation. The resulting time is much faster than using the SVD implementation of \pkg{Eigen} which is not a particularly fast SVD method. \section{Delayed evaluation} \label{sec:delayed} A form of delayed evaluation is used in \pkg{Eigen}. That is, many operators and methods do not evaluate the result but instead return an ``expression object'' that is evaluated when needed. As an example, even though one writes the $\bm X^\top\bm X$ evaluation as \code{.rankUpdate(X.adjoint())} the \code{X.adjoint()} part is not evaluated immediately. The \code{rankUpdate} method detects that it has been passed a matrix that is to be used in its transposed form and evaluates the update by taking inner products of columns of $\bm X$ instead of rows of $\bm X^\top$. As \pkg{Eigen} has developed some of these unevaluated expressions have been modified. In \pkg{Eigen 3.1}, which is incorporated in version 0.3.1 of \pkg{RcppEigen}, the \code{.adjoint()} method applied to a real dense matrix copies the contents of the original matrix, flags it as row-major and interchanges the number of rows and columns. This is indeed the adjoint of the original matrix but, at one time, the \code{wrap} method for the \pkg{Eigen} dense matrix classes would fail on row-major matrices. \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwb{const\ }\hlstd{MapMati}\hlstd{\ \ }\hlstd{}\hlkwd{A}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapMati}\hlopt{$>$(}\hlstd{AA}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{}\hlkwd{wrap}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{transpose}\hlstd{}\hlopt{());}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{badtransCpp}: Transpose producing a run-time error in early versions of \pkg{RcppEigen}. \label{badtrans}} \end{figure} In the code for the transpose of an integer matrix shown in Figure~\ref{trans} the transpose is assigned to a \code{MatrixXi} object before applying \code{wrap} to it. The assignment forces the evaluation as a column-major matrix. In early versions of the \pkg{RcppEigen} package if this step is skipped, as in Figure~\ref{badtrans}, the result would have been incorrect. \begin{CodeInput} R> Ai <- matrix(1:6, ncol = 2L) R> ftrans2 <- cxxfunction(signature(AA = "matrix"), badtransCpp, + "RcppEigen", incl) R> (At <- ftrans2(Ai)) \end{CodeInput} \begin{CodeOutput} [,1] [,2] [,3] [1,] 1 2 3 [2,] 4 5 6 \end{CodeOutput} Although the problem no longer persists for this particular example, the recommended practice is to first assign objects before wrapping them for return to \proglang{R}. \section{Sparse matrices} \label{sec:sparse} \begin{figure}[b!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{MappedSparseMatrix}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{using\ }\hlstd{Eigen}\hlopt{::}\hlstd{SparseMatrix}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwb{const\ }\hlstd{MappedSparseMatrix}\hlopt{$<$}\hlstd{}\hlkwb{double}\hlstd{}\hlopt{$>$\ }\hlstd{}\hlkwd{A}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MappedSparseMatrix}\hlopt{$<$}\hlstd{}\hlkwb{double}\hlstd{}\hlopt{$>$\ $>$(}\hlstd{AA}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{MapVecd}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{y}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MapVecd}\hlopt{$>$(}\hlstd{yy}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{SparseMatrix}\hlopt{$<$}\hlstd{}\hlkwb{double}\hlstd{}\hlopt{$>$}\hlstd{\ \ \ \ \ \ }\hlopt{}\hlstd{}\hlkwd{At}\hlstd{}\hlopt{(}\hlstd{A}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"At"}\hlstd{}\hlopt{{)}}\hlstd{\ \ }\hlopt{=\ }\hlstd{At}\hlopt{,}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{Named}\hlopt{{(}}\hlstd{}\hlstr{"Aty"}\hlstd{}\hlopt{{)}\ =\ }\hlstd{At\ }\hlopt{{*}\ }\hlstd{y}\hlopt{);}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{sparseProdCpp}: Transpose and product with sparse matrices. \label{sparseProd}} \end{figure} \pkg{Eigen} provides sparse matrix classes. An \proglang{R} object of class \code{dgCMatrix} (from the \pkg{Matrix} package by \citet{CRAN:Matrix}) can be mapped as in Figure~\ref{sparseProd}. \begin{CodeInput} R> sparse1 <- cxxfunction(signature(AA = "dgCMatrix", yy = "numeric"), + sparseProdCpp, "RcppEigen", incl) R> data("KNex", package = "Matrix") R> rr <- sparse1(KNex$mm, KNex$y) R> stopifnot(all.equal(rr$At, t(KNex$mm)), + all.equal(rr$Aty, as.vector(crossprod(KNex$mm, KNex$y)))) \end{CodeInput} % Sparse Cholesky decompositions are provided by the \code{SimplicialLLT} and \code{SimplicialLDLT} classes in the \pkg{RcppEigen} package for \proglang{R}. These are subclasses of the \code{SimplicialCholesky} templated. A sample usage is shown in Figure~\ref{fig:spLS}. % \begin{figure}[t!] \hrule \smallskip \noindent \ttfamily \hlstd{}\hlkwc{typedef\ }\hlstd{Eigen}\hlopt{::}\hlstd{MappedSparseMatrix}\hlopt{$<$}\hlstd{}\hlkwb{double}\hlstd{}\hlopt{$>$}\hlstd{\ \ }\hlopt{}\hlstd{MSpMat}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{typedef\ }\hlstd{Eigen}\hlopt{::}\hlstd{SparseMatrix}\hlopt{$<$}\hlstd{}\hlkwb{double}\hlstd{}\hlopt{$>$}\hlstd{\ \ \ \ \ \ \ \ \ }\hlopt{}\hlstd{SpMat}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwc{typedef\ }\hlstd{Eigen}\hlopt{::}\hlstd{SimplicialLDLT}\hlopt{$<$}\hlstd{SpMat}\hlopt{$>$}\hlstd{\ \ \ \ \ \ \ }\hlopt{}\hlstd{SpChol}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hspace*{\fill}\\ \hlkwb{const\ }\hlstd{SpMat}\hlstd{\ \ \ \ \ \ }\hlstd{}\hlkwd{At}\hlstd{}\hlopt{(}\hlstd{as}\hlopt{$<$}\hlstd{MSpMat}\hlopt{$>$(}\hlstd{AA}\hlopt{).}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{VectorXd}\hlstd{\ \ }\hlstd{}\hlkwd{Aty}\hlstd{}\hlopt{(}\hlstd{At\ }\hlopt{{*}\ }\hlstd{as}\hlopt{$<$}\hlstd{MapVecd}\hlopt{$>$(}\hlstd{yy}\hlopt{));}\hspace*{\fill}\\ \hlstd{}\hlkwb{const\ }\hlstd{SpChol}\hlstd{\ \ \ \ \ }\hlstd{}\hlkwd{Ch}\hlstd{}\hlopt{(}\hlstd{At\ }\hlopt{{*}\ }\hlstd{At}\hlopt{.}\hlstd{}\hlkwd{adjoint}\hlstd{}\hlopt{());}\hspace*{\fill}\\ \hlstd{}\hlkwa{if\ }\hlstd{}\hlopt{(}\hlstd{Ch}\hlopt{.}\hlstd{}\hlkwd{info}\hlstd{}\hlopt{()\ !=\ }\hlstd{Eigen}\hlopt{::}\hlstd{Success}\hlopt{)\ }\hlstd{}\hlkwa{return\ }\hlstd{R\textunderscore NilValue}\hlopt{;}\hspace*{\fill}\\ \hlstd{}\hlkwa{return\ }\hlstd{List}\hlopt{::}\hlstd{}\hlkwd{create}\hlstd{}\hlopt{(}\hlstd{}\hlkwd{Named}\hlstd{}\hlopt{(}\hlstd{}\hlstr{"betahat"}\hlstd{}\hlopt{)}\hlstd{\ \ }\hlopt{=\ }\hlstd{Ch}\hlopt{.}\hlstd{}\hlkwd{solve}\hlstd{}\hlopt{(}\hlstd{Aty}\hlopt{),}\hspace*{\fill}\\ \hlstd{}\hlstd{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }\hlstd{}\hlkwd{Named}\hlstd{}\hlopt{(}\hlstd{}\hlstr{"perm"}\hlstd{}\hlopt{)}\hlstd{\ \ \ \ \ }\hlopt{=\ }\hlstd{Ch}\hlopt{.}\hlstd{}\hlkwd{permutationP}\hlstd{}\hlopt{().}\hlstd{}\hlkwd{indices}\hlstd{}\hlopt{());}\hlstd{}\hspace*{\fill}\\ \mbox{} \normalfont \normalsize \hrule \caption{\code{sparseLSCpp}: Solving a sparse least squares problem. \label{fig:spLS}} \end{figure} % \begin{CodeInput} R> sparse2 <- cxxfunction(signature(AA = "dgCMatrix", yy = "numeric"), + sparseLSCpp, "RcppEigen", incl) R> str(rr <- sparse2(KNex$mm, KNex$y)) \end{CodeInput} \begin{CodeOutput} List of 2 $ betahat: num [1:712] 823 340 473 349 188 ... $ perm : int [1:712] 572 410 414 580 420 425 417 426 431 445 ... \end{CodeOutput} \begin{CodeInput} R> res <- as.vector(solve(Ch <- Cholesky(crossprod(KNex$mm)), + crossprod(KNex$mm, KNex$y))) R> stopifnot(all.equal(rr$betahat, res)) R> all(rr$perm == Ch@perm) \end{CodeInput} \begin{CodeOutput} [1] FALSE \end{CodeOutput} The fill-reducing permutations are different. \section{Summary} This paper introduced the \pkg{RcppEigen} package which provides high-level linear algebra computations as an extension to the \proglang{R} system. \pkg{RcppEigen} is based on the modern \proglang{C++} library \pkg{Eigen} which combines extended functionality with excellent performance, and utilizes \pkg{Rcpp} to interface \proglang{R} with \proglang{C++}. Several illustrations covered common matrix operations and several approaches to solving a least squares problem---including an extended discussion of rank-revealing approaches. Sparse matrix computations were also discussed, and a short example provided an empirical demonstration of the excellent run-time performance of the \pkg{RcppEigen} package. \pagebreak \bibliography{\Sexpr{Rcpp:::bib()}} \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End: RcppEigen/inst/doc/code.R0000644000176200001440000001650012253717461014717 0ustar liggesuserslibrary(inline) library(RcppEigen) incl <- ' using Eigen::LLT; using Eigen::Lower; using Eigen::Map; using Eigen::MatrixXd; using Eigen::MatrixXi; using Eigen::Upper; using Eigen::VectorXd; typedef Map MapMatd; typedef Map MapMati; typedef Map MapVecd; inline MatrixXd AtA(const MatrixXd& A) { int n(A.cols()); return MatrixXd(n,n).setZero().selfadjointView() .rankUpdate(A.adjoint()); } inline MatrixXd AAt(const MatrixXd& A) { int n(A.cols()); return MatrixXd(n,n).setZero().selfadjointView() .rankUpdate(A); } ' ## section 3.1 (A <- matrix(1:6, ncol=2)) str(A) transCpp <-' using Eigen::Map; using Eigen::MatrixXi; // Map the integer matrix AA from R const Map A(as >(AA)); // evaluate and return the transpose of A const MatrixXi At(A.transpose()); return wrap(At); ' ftrans <- cxxfunction(signature(AA="matrix"), transCpp, plugin="RcppEigen") (At <- ftrans(A)) stopifnot(all.equal(At, t(A))) ## section 3.2 prodCpp <- ' typedef Eigen::Map MapMati; const MapMati B(as(BB)); const MapMati C(as(CC)); return List::create(Named("B %*% C") = B * C, Named("crossprod(B, C)") = B.adjoint() * C); ' fprod <- cxxfunction(signature(BB = "matrix", CC = "matrix"), prodCpp, "RcppEigen") B <- matrix(1:4, ncol=2) C <- matrix(6:1, nrow=2) str(fp <- fprod(B, C)) stopifnot(all.equal(fp[[1]], B %*% C), all.equal(fp[[2]], crossprod(B, C))) ## section 3.3 crossprodCpp <- ' using Eigen::Map; using Eigen::MatrixXi; using Eigen::Lower; const Map A(as >(AA)); const int m(A.rows()), n(A.cols()); MatrixXi AtA(MatrixXi(n, n).setZero(). selfadjointView().rankUpdate(A.adjoint())); MatrixXi AAt(MatrixXi(m, m).setZero(). selfadjointView().rankUpdate(A)); return List::create(Named("crossprod(A)") = AtA, Named("tcrossprod(A)") = AAt); ' fcprd <- cxxfunction(signature(AA = "matrix"), crossprodCpp, "RcppEigen") str(crp <- fcprd(A)) stopifnot(all.equal(crp[[1]], crossprod(A)), all.equal(crp[[2]], tcrossprod(A))) ## section 3.4 storage.mode(A) <- "double" cholCpp <- ' const LLT llt(AtA(as(AA))); return List::create(Named("L") = MatrixXd(llt.matrixL()), Named("R") = MatrixXd(llt.matrixU())); ' fchol <- cxxfunction(signature(AA = "matrix"), cholCpp, "RcppEigen", incl) (ll <- fchol(A)) stopifnot(all.equal(ll[[2]], chol(crossprod(A)))) # section 3.5 cholDetCpp <- ' const MatrixXd ata(AtA(as(AA))); const double detL(MatrixXd(ata.llt().matrixL()).diagonal().prod()); const VectorXd Dvec(ata.ldlt().vectorD()); return List::create(Named("d1") = detL * detL, Named("d2") = Dvec.prod(), Named("ld") = Dvec.array().log().sum()); ' fdet <- cxxfunction(signature(AA = "matrix"), cholDetCpp, "RcppEigen", incl) unlist(ll <- fdet(A)) ## section 4.1 lltLSCpp <- ' const MapMatd X(as(XX)); const MapVecd y(as(yy)); const int n(X.rows()), p(X.cols()); const LLT llt(AtA(X)); const VectorXd betahat(llt.solve(X.adjoint() * y)); const VectorXd fitted(X * betahat); const VectorXd resid(y - fitted); const int df(n - p); const double ssq(resid.squaredNorm() / double(df)); const MatrixXd vcov(ssq * llt.solve(MatrixXd::Identity(p, p))); return List::create(Named("coefficients") = betahat, Named("fitted.values") = fitted, Named("residuals") = resid, Named("s") = sqrt(ssq), Named("df.residual") = df, Named("rank") = p, Named("vcov") = vcov); ' lltLS <- cxxfunction(signature(XX = "matrix", yy = "numeric"), lltLSCpp, "RcppEigen", incl) data(trees, package="datasets") str(lltFit <- with(trees, lltLS(cbind(1, log(Girth)), log(Volume)))) str(lmFit <- with(trees, lm.fit(cbind(1, log(Girth)), log(Volume)))) for (nm in c("coefficients", "residuals", "fitted.values", "rank")) stopifnot(all.equal(lltFit[[nm]], unname(lmFit[[nm]]))) stopifnot(all.equal(unname(vcov(lm(log(Volume) ~ log(Girth), trees))), lltFit$vcov)) ## section 4.3 dd <- data.frame(f1 = gl(4, 6, labels = LETTERS[1:4]), f2 = gl(3, 2, labels = letters[1:3]))[-(7:8), ] xtabs(~ f2 + f1, dd) # one missing cell mm <- model.matrix(~ f1 * f2, dd) kappa(mm) # large condition number, indicating rank deficiency rcond(mm) # alternative evaluation, the reciprocal condition number (c(rank=qr(mm)$rank, p=ncol(mm))) # rank as computed in R's qr function set.seed(1) dd$y <- mm %*% seq_len(ncol(mm)) + rnorm(nrow(mm), sd = 0.1) # lm detects the rank deficiency fm1 <- lm(y ~ f1 * f2, dd) writeLines(capture.output(print(summary(fm1), signif.stars=FALSE))[9:22]) ## section 4.6 print(summary(fmPQR <- fastLm(y ~ f1 * f2, dd)), signif.stars=FALSE) all.equal(coef(fm1), coef(fmPQR)) all.equal(unname(fitted(fm1)), fitted(fmPQR)) all.equal(unname(residuals(fm1)), residuals(fmPQR)) print(summary(fmSVD <- fastLm(y ~ f1 * f2, dd, method=4L)), signif.stars=FALSE) all.equal(coef(fm1), coef(fmSVD)) all.equal(unname(fitted(fm1)), fitted(fmSVD)) all.equal(unname(residuals(fm1)), residuals(fmSVD)) fmVLV <- fastLm(y ~ f1 * f2, dd, method=5L) all.equal(coef(fmSVD), coef(fmVLV)) ## section 5 badtransCpp <- ' const MapMati A(as(AA)); return wrap(A.transpose()); ' Ai <- matrix(1:6, ncol=2L) ftrans2 <- cxxfunction(signature(AA = "matrix"), badtransCpp, "RcppEigen", incl) (At <- ftrans2(Ai)) all.equal(At, t(Ai)) ## section 6 sparseProdCpp <- ' using Eigen::MappedSparseMatrix; using Eigen::SparseMatrix; const MappedSparseMatrix A(as >(AA)); const MapVecd y(as(yy)); const SparseMatrix At(A.adjoint()); return List::create(Named("At") = At, Named("Aty") = At * y); ' sparse1 <- cxxfunction(signature(AA = "dgCMatrix", yy = "numeric"), sparseProdCpp, "RcppEigen", incl) data(KNex, package="Matrix") rr <- sparse1(KNex$mm, KNex$y) stopifnot(all.equal(rr$At, t(KNex$mm)), all.equal(rr$Aty, as.vector(crossprod(KNex$mm, KNex$y)))) sparseLSCpp <- ' typedef Eigen::MappedSparseMatrix MSpMat; typedef Eigen::SparseMatrix SpMat; typedef Eigen::SimplicialLDLT SpChol; const SpMat At(as(AA).adjoint()); const VectorXd Aty(At * as(yy)); const SpChol Ch(At * At.adjoint()); if (Ch.info() != Eigen::Success) return R_NilValue; return List::create(Named("betahat") = Ch.solve(Aty), Named("perm") = Ch.permutationP().indices()); ' sparse2 <- cxxfunction(signature(AA = "dgCMatrix", yy = "numeric"), sparseLSCpp, "RcppEigen", incl) str(rr <- sparse2(KNex$mm, KNex$y)) res <- as.vector(solve(Ch <- Cholesky(crossprod(KNex$mm)), crossprod(KNex$mm, KNex$y))) stopifnot(all.equal(rr$betahat, res)) all(rr$perm == Ch@perm) # fill-reducing permutations are different RcppEigen/inst/doc/RcppEigen-Introduction.pdf0000644000176200001440000136660212647727164020734 0ustar liggesusers%PDF-1.5 % 135 0 obj << /Length 3552 /Filter /FlateDecode >> stream xڥZIsW|"+" tcINg`\ 3V~}h@XJ$h~-$EIM"_ݼzqjMj"5Ut~\[V٢^E*+|>v4M >oѷpc;|w8gvCsw˟G-2?4ջ< e11i!g8oMQ.㦐 ^Y 6\oLn4Z4mT:o_._ԻY9ga£HmfO-Ar{O3XQefrS.v^~M 7 2PŒ9u$:[٢=̯Coqh:hNViZ9 ['`YZM%xFB1Oœf35bQ;WT-s4~ZZeUeqV#Rr@2}N-'@ٸ !q&a˭8xhKWNoj :0LHL@_8Rݒ}xF6r&k?3v^)>U{PW4/Vmlb-3;ш׾̏p=ݐX@W}ptIQI3rf9:UW`Rac1/¸OY=WeJ\Q2 HU^8mlp6Sb+sm;#cĴq2H.Uc`+Pz|fLfw- G sg'ޏTE!>`cDj}gc12@|rD:W5q/pQ]<}?[x /=u8y85"Z3^rLh8[^C"&G n9k"vB gvYzs|z(t^fqϷX*#D{͇F]ld D:)gZruءEwLp*.KJ64U7$bu;Y@(U9£gAث".Q'[<%$f{N\ Y_\ɀY-EQR r{<ɼo&ޞI½ Ko2ϟ(]CmcB⢒b MxLyl?㔧Ijsulv|w4pPz[_ag9ȅJaJ=40Na Rsq:`t<[tZH 2,<JR5,2T$h|PI]C)NAwGƞx9B#f1ϒKDV*g񠵘a#8)#"Vx Y`[0z%p2ndom /K( >"F;Aoo,[rA Wdhu+4'n+0cR~Y!N.<.$TOe;Ej~$!9wG<#a Q9{(uw8vlo-- Że7*zՌ%d l0hNthRKǤ52wxwkE &oZce䚵>B*L}@73m_, c#}IvyC?򣪌܍;SՈ67_ 'LL<. ENb_Qmأ@X傋WKȖQMɠ<(x8wBLraOԈBʵ.y6ftĥ-K⤰A^dе's[eb{\AOxҐ6yF-,S$~>'~AR?L鑇M\0Os=ŵPRk7r6!z%yqaRDMIuX|3+9 Z;=>SJ6~&As(!-'󃗵*I_Ŵ n<\~zNI\]I.:E pyth"N!we ]׭Ľ St$G#/5M)(LzcO*|NLD#T镳wZKP/Hj*'iv()e8ZObYO+)ڛ#xk'\Hݨ#ћkHSbF+{SfDC ̷ZbNr8>vqG8 O8Hg$ :땙o蠜6\\hbIjVEB'-zL;0xrZ1Gf(6JK1njak:ǒAi5M+} ?e4)EA t-$D _[bǥi ZIcS&LQo]*`-n kLYL[OXvPZҤl!bLn1>{GцG>NCiق5B)z,A `?šJc$E$IcfmnUYĮ(#eSSvh,gg= p5CI¦ iCld`!GS W+=YX956qCc^y[[Nt)R-Esy瞈\ԡΝppm>2RvQ endstream endobj 178 0 obj << /Length 3746 /Filter /FlateDecode >> stream xڥZmo"/uеN$m臻Plɭm}ܯ2,Ǿ+2י3gެnқOo|曏&1iR{MI3{XeqL g fM;gg̖fq vI7XOY4k.m[칃z%Xgw#Oκ2I lkUvCERZIdwb/=jK5Hޯ zRv2;В46h9%  -lU0̦{Kd+ˠ%yrmbBf)Wf\r;k;'o{ߣ\ic[gXȄ0( ͵eq?w` [ ෠$AcI6Mkπl, or%)11ؼK2~6#2mj]2i/ 򭯉6C~1hpeF¼B8 |9֊ ^a@{.qɣ*9||q%Y`4AUoGuS LViO(.jK;pJ)Ft'&,Sn0֌h4iL%if(\@_aEZ߻dC^KoVvX_ʌGw VH)yǭdTH $sJK.$ѱˮ=$M_yѢʤ"}|kC- 9L@qMռ;RU8+(`]|IH/OT3O.h-;FkLLI#f;-h'dʌL_taX,ƙ<'nAsW[1Xn]mmވzi1:[4XbDG Aveɺza<%$[|z bG2Y W:9b>n9x!L+3Yj 8C)' Bj8x*~38-EH}e"gX&ލK`O|1S[n  6 ՒXć _ } ! Y+ Dho}NDNjB/u$![-}+7e'*m$MsUE9]x&TZe<FD]"w]|g2zx\ ntJE"}=WzT쨪g ]=_jjB+.\yd.ήz˗}H2Y /̑^ߒ̤ޙCK/Mmc~/wW` vt{ b#Y(#Ffث}os\#zH{ *Io Lp'B\\5$ C+'łU1B/48`'%r9p)8&o$Ru8YZWߝ682x*uqp endstream endobj 2 0 obj << /Type /ObjStm /N 100 /First 920 /Length 34689 /Filter /FlateDecode >> stream xڴeP\A6 !$;3?^ [l Ҙ9203~3H-X?h "L,, H$3|Z  `100°|2 `d*'3 ;Ȯ̬ BOf6F '3dOf ;'3;l̠[ fVf6'2cbbfeȘZ2, @bҋ@2~B@OIOvvW aO~vv]'@!Ois6f(ؘ@6ebc~ ;LllcFdY HH2| Dx4`3fYSag3BؙAz@2|J dbg6@8h<;缠af`a|2 ٍLys\ d|H)f6PD03? <u<?W1pqѫ9B^Nrv66N@zA[+[E;}Ccz!csCcQz)cS`+@/bnd@/bd,dlhkd`o znA,'* L$""_WSo兘}u)oVs?fR-t4q&ߕN^`1cA;Hzԑc7Vl}{;gx_`N9Iɫ-tpTtԔ=ݺNLf[g{{Au{FVkbv3sy9O2ݥSCqW;4vśţvZiDG-C ߳JȈ~fI=쇝F,8fڍ8{y 0MnU+q]Aqɘ٤oգ4@hE68ݹO{ah0uV xgPMxk6bRBY^h+sp껟̐8fUb K0ͧ~-}PB #u.KID|/Msz&?L/@𖌑7hMOLῪ/ +m#鱐 ["nEK &6E'||@baggya$ c&x$P^;1yu[ %"0 EYho셋OK6 X|@pgnW vlx?[@;e`A_>BןxY4駧͘M#_ގ%1ɓio,\o#Q sm3 ;qt۸:ry*̞(sd=IT?5R P1oA1\pxt!$~vuGlJ 9C}@'O9$a.*{yH^W;*Ƃj͵BGrw*yyV+}:ܐ¾Zz{z]LivT 4=c-IۋϺ@B2-'KIy0ep*g1joFĽ==jWh*nf5QTS7:|y-uZmqf0#H_/T+ 3^[-9C?q3G-%33һmΫF>m!}E49 d|H[vgHte=y M *i3:.ғΛyHmnsBs}-u)3mA47%$Fabu6xWyҘ)-CY󚕤PC5$&~m 9>.YH @"e| Ԧ6-25Gf$<;6LS"WF`6 LFk^ktO cۚ[ ?m6{?H(,o @ R4m-ə8ZW8}gG1  R1Z^4Q?ݴhTm]xIˇR,'Ortmnnϔ)1C?+p[N brZVXb*}Re -^Ld @`ˌ#*Yb^2v-1ˮQL!YT9(so ߹8mL`Tat*h~L+u[\]4%v|Ǒjxvw\+Iף,T7AYM>&YUVV)M:Ao11HU=2HT\EwER^Q7:f.@Ycz*-~](ŸI}I4Яٌ Ź5ԘZ#Avdl-O*8̳LӿAe"_8kf6y(f] wy&;I1j%/UE|mUg?JS"2ehφ.FHXH+—TB[tZ  +.O <ŒOrg% )g̎}jP N!2*IJKҼ)_c[i>cz Cp$GB8YԾ/BAEOAȴf87z~dX1]htF?ـ'T{h%F1(e_ fCX1fHX!_)Ts&FyoDt?.g':-t)$/ Hźq 2dgf9J]^pbӸKS&  _j+z|U'{I~zzzV׻TaJ{4›.$6 96X'zcqFp/ށ@TO'|^9|u W?I!TEMHhBYDU-cTL7lt_<]=zJNG e=)-VPeYxY&[ = } ףQ*Mb?r+}cb`;]FJnBr.OHyB~:$mif, Fρub#vCTnhD1P!"yZ 䱣#`L=O:KK5(Y`ydtJ=E@1.Ϙ1W-4W YxMM/ڂ v1hob4223Y81%ظÉhIb[ݶ q>S݊iÓjܖy ql,k4E@4C7~W;dFf>4S/ bt.>&1{G& G=VIaMYW S |c0hvipw/Cu2ffEͿ"y1#A_*E@*~V6mmAy鵵&ݫ|l-"E=Cy65Ob/=+bؑ?{Gu ќ{Bj)Nz[)hv8d6L.L$/ J2WsTm'w6Ɇ`Ro " 弚ĥ'ibVKV ~:u,r]p%"oOuTg =_`f%xHCwΖ/q5"9Ea^dT.)k/;"q9cb}h.׉ nҀgCcW FN q(%8 9w!Qs2##C)Y_)/..diÕ鵘fNO:Fj꽓pxDu2G;4sO]lgĸs#mѲox!l;g~W!(2$p$0G" L*ЉsۂV:`Ŋq<qщ-aH'p𫱌$̰k5@w9!G#<8\xE|:@)f@9 ZLJ;9B ͍}*LrQ胝bְUZˑ !(5JhJq6wf_װτϒ\=%͜[^s2RO+RBXqJ"nk<œu1/ܜbJǣ9?TBbWʄI,3䳤#?:{iiɪ>[(3m6}k2+l0 ky@QҖTptL|P3HGԼiq֗Q)#GgrMerGNc)hYwx mY_Hc~ؿt!a >NswQ7!`<=F᠂ J2k͔{zZ-׈(&~ P@=`SyA"2ZN%ٙ }!ىSi8{#{ףGN̑cnۖ¼b=-4e+)L[Gu844L|P%jW^t 3~V#ԟ XF i-F  p<kúF™`nI|*p57K^e׌_m./]ֽ CEeѥ}: aҽrVRma~~i}|sJ]#Vφa7rUgĀ Z\Fmx1|٨n^]’'+gbʴ$ e}mϵWǁG W<#fJ.\"@'fDn:„v-0$dF ^JBg[-C/ #~R=:̃)G"%\U@XGle|hy;~?#EDl!=2kݢwr8P@q8)EbB${YU0 jDqÚ<.lQ6z\\,"'v$Eoiv=u4vE²lRf?/DL DOr̠1t77 )lѿ`@YțDVdQBJء=E?J7S\@Vv.nt ?oD9[Fk?͜s0վ޷\veL@b V½ 3p!@DSdT _=E8o",?#~Ca*(,C mIMԯ9-Y;43~[V\t׈y[K?tD̼ f5/sF/59o7dC Us OtY{`pw#6z`mWfvoi¯u@j^ e7]Y~ϡݾ]<ݾ !n`MΏoQ#jc/:oNt?=)s*f1 p2B$-Kʺ<ey a % M]|,kb!3~A IuP&RL;n IPG:]f n\u>{/㈦Upjβό(?y$LjhhU[Yph-M#+;{ʤ[AKn͹cSSNVg%+ aY">r3;X~ 7RbttylF.OlwQ崹ۛDeՖ+ҁ=ԵJm<`/܉s59&EͶc?xcd)tz~D4v`%Q<ϻK߈Ftܽt:s士smh1bV Q 180#9A(1+!Uj%O{_ BY#DYzxnԸӵK=Afo 4W?^A|_!_ZtSKsy,;FXlhiE͒VNN -n⋼a5|ϻUwM;)kwij2cf GhsBmnw 5^'Z+a{W֬XwУt^ahـFo3Kly_X7*dL&ք-iB [;u֕p*+:5laM&ݺ7 :ibj>(̨C'n]vpBMP&qQU&L>@\ep2fH|^lK̀z5+EMPUQW Zp1\ܿ`Z~֝`RFhH.3lU3 . vEj/c޸Z)}J#SQJ0!jδ}#05~cg`:.JY'Eۚ}\V%`Eq$)Tһa$~tur.V5?ݮ!ީ/q,ǖ4)j߁No#в?},Ce| !Ɛgy!x 1A# 1%ƫK$uu!XgN\^Z'B/0}l}Y(1 P%_˝WI:0D6 {'Y΂.Segf*b,ɁcCBYNk}|=S3b7p|.ZO:_LN52 [JB"6ռSLG"7 y-FOǑU.4(/gOQsU3x=<Q,Z?%++0=E6OwNȡ]h7x7Dw%DO3}@URg $񈓇: "84UZԜBwJd gbC b ~/@]8R츪E),Jk-(YfO?-G0G p[Z*8iGfXov|Or%6{*%Da<]Dx.34pvʃ\̆_Qţ|#3 AہTi-ők3\?{}N*+3|{ < !sC6MB_b_;Z+ޢ)~K ;?tnH-c#RWmVeY pIM&S-(&zm|rw{ E9NCFFVlbl~ŽokcÁO֭C]z33lGYWݿw<.JhkZ6Ԕ:+}yJ?֠KNch w w[,p-!Du}'pPe'Gr(f0iG;h\ )Y{⏭U,Go*|sF Vց!8x_wkHdï_#ӽoxx+Pf́u"@ !_Gu=o!;*EWUViܝG09Uk.7rZ T?J\>RͨG ui$.G3EC9R{_+"}fo攀L/N>YB-97%nϷW6\G;G[Wfg;M7q8f~GMovCk:3$s[8.R{,~?t}fj-JVZژ_I͟#o{?j3_5!qk#Yk{:(BMyv e`~6p0>imkSf*GBzWP{hE)Hi_aA (VbÐrm,.oH/hFw V qvڜB8b1k-Ʀ|o9J3d%Ietwq9YZYIVlP#2k_3BF8Br_+20q8$u 4Vh(#Rr.NB~$0o0#9/RVKqWocwi1Zbiw&} 4!@;T kP=n<*Z@J&Cdo6p<,P$eNz(tݒOX1{:"X=1=}Msmcg-X&*\?dɪ7A-,&GĤdPngslӗ[%3k1dKBIGU(\پDl6jъ|y~6pndgHUpMNטPve*<H)geea:1"4"NIkcԧ&(_o}@55Cu>cX|JQٸY{?خѸKX1AJH/& !#!ȽMȋi^a=^q)vW|Piz jՆ {Qh|m)F`Rz7^z8>A%뛓*U?0ٳ٤%Q|~&F\7= kTײwcqi|YAwSɒ!6xYvxaPX,*A~&r[H*N/nCO 4Kd fv./l˃j@93oHATsns+a.#fu Oդ ͌m_^n! -XWG-'{ˑ;u)e0ײh=sB1;3sF qTm*sf|ug iDZkTvg[7GLT/j, {5T;[[Q T ĔZOA$r;.[9TmJsCmm/{?gLE z Z# )CmBR_{2hExieW)n"E[+;тֱ@q z::)x['r!3rmǁv6Jnŝ?".l&їB}ݾܑt~l+ޒ3%h-v)6}qsGk %VY,5Nq-x=it‚ql\$gGZ'*k'k†blĕS)gns u֯xo,ڢA_rU򘦦lڿ9T8}wkY@DDrH )c\z@<#)gP\w?=IN{,*{ikbڮw<&њi( &p~x\G 1X* G~rmZTiw:#۲")xy[xy8+ol>t/ᐭdǫg<9[|4w_$.,sdk%WOqPI=pkV;H\vƂ12W}j䅳HYJ28-p1dfC530Q՝xM612-riAz'Omb1wR#j3Z}ǙY/'v,9VTqP?#S&5JAٛƻ%vޓg*wlnw0,&bEE9CP9)/?=e3y8_Tzfdn45xxw=9٩HL;%Y/(WɏGXMBU` 𻧿WXmw_x]l€).S?tz9P,9co4!P@#ʾFgos`䪱~h%рrAMs!{9B-mu/[u.>.> **''=hNڊpAv<ƀ _ 5N9 38glS[>=3 v iT=ey.Tv^~!n5^%);A0yrp6}B:sƨU$1d83\gW O3άrxҠuilp0;(g!LZVdwPfEP2wys_(čʦ7WN-%%'}OiIciK12R؇!JJfzciv<^# 6bUx(%*xO;Sa'3"fd)1Kl Wx7aw_ E^Xhb͋ ouy) Vބs`{Pb"iJ[-|_(SԄv&g&Tځ"馂hr Ve65T̢\"1Di=. :aEX>`Ǣp2|7PfZ;fY:ќx2ȍ$l= 0HJJzvv/5p&L߉bO4_w&,m'쿞4l>#`PӇKOUeGCv8O I3/ U 6/$ ?ӌ(5nQ\`V$u %Dm@Y1o! +QOܭ0z&ʌa 0C{)L$.͵A1~/^UW4Pi@eb`%.FIJ%m/'R$e_,|HrKpT!v%ͨy!HJgkBY݅RzûڟtÛLI5d)1 ~7q>y#/܆`3[-'0 顐OZ )pM&$ q0/Wۃn)ϟ2z)U*d_Tp)!`qM"w鰹kE'tYE1|13iyYd>qD|CA|" b "LmPTw*+{5 MW75fQw_ʤ&Q%b [R_bڢSSr*?4,,,W h(.\-YU^_Ǚ%n2|_/i4=jc~%Rѯ r4?yGY,Y%ks)\pj'gb𩃩]v2Uchh#7,Vn,GD5xBBHvu]I XJU$NGh<+yuvpY[&;o9TKJY9ܒhӆɅ VzSz{: +:mz &xE|SN,D3^K;Q'٘ɗ]=/ђG†'z=b)Yvs٠>[VTřtǐwLu@O&_/F[dO l"~!̑D(YP uM{b kا+[>w5yO~.O ,qڶqڪp>vpBtuw,#Z"B -%67h ܼN[hNHv))wnq ;:C.]P/..;4 $+L(DX rD5?4=Ň*`Kq0F#?b~T.XBKY+-cOxd *B1_V'I$[J; c< tn^t<&e [~m.^rIw[l/œrd Z[&q2aCІzA÷ԑFBԺc_:LAZz ERR%ͻӾ2lIkkل2CPw/Lr~^>WRlqשRƺX 7zlg/PM~YvO(qs 3&C?v` mT+ ptO~eH]D%0G'2 @5-I3 #.I`P5O(v5,"ȃE]wT|5dICHX-=8iTQhKE.bl^[a]S> $k5΅x @(*d'M-c4uESf-Љ黚 !3 ꤢ 666dNe(]נ\k;BM૪Φ5ttթ/i_l{˟.vLpkҘވrc4[GYIj# ~RG16 nM|< P'ng$7foźrr%zzx( +g YV>Ț4F+&˽|ĺG3190hb`wUr1lhd2W_I}~"CdU40L0j\3w㬽pD-F{[Tޘ9nAD>ؗ:F0'ǏRFXF@kN}O.vX.V<2^r~|t"wMۻs܀c^b՝"s9|nzr!{5ٱ8\B66c2]=ŲI"#&z$x~cw0`JLA)ֵoB6&rrrs W^PAdP&P̤=FV2Sg2*6i@w/|[wbECAS2w];% &3O%~=6C&xڦCWgwԺ_G}#DBrSZ:H\~Ś Q+L_G9ޅ5P\ݻwբK|4n4|څVޘsmH]c"8H"XiG+A]D  e*Nn倓ki˹uz ^..nqCʙ^Vj~TrLۜvf~ У޺Y\r2dZ*mB& Qot ~ch|\KyV'Tpe.UULMrz;ˆGneskiw,'nI:3Dm|;H2EIY`9+J[xDPev,ՐKmi4$*7@7?gM|B5-+4G)8w9Zwʂs$6\e#iAd)?厝nK_f'em )ᅐ Axjq) *p+T g?xr]3#g4x|6 hmmo<5%وf[=ݦe Sf#ȷ 'SҋJqEq[_LegcuJU|l 4s"Ț9SH]9HŮ| A0IJB'Uw~m3! ;1)-))rpd8 7#%"q`# vhw#HE2ZY*F3+MNX=8x l ĘYN6&l;ML+kiDil<|!"(#CH;s_Sج[A,~VOR4݀i-@P$VdF:]0{-.sњĺ8Q4 ?#ѣKΜXL1s --S~?_x~2%;`K~ )or{yu{8팠Ccc[8ff8ٛ  TF2Xݡm׬o}qt|?z=^IldNvnQ^>cO)$r)kYE2{~ Q.Ë-0)+8&PXt;?t#MoDdKjf.m,'4mчh =Ţt rAE'K).M~Pw Qtn[@x* '5v`=&cHf̆  B9 mPa==]zgf'L'V[/ᶹW4W-ZzLlwlrы@FM"MWnj+ouy̜UcBIM:䊧/[nPW;΁4Lq׌|1x6]2Խ5;h"C(y <Ѳj:tF [=4( N8 \!E!8 wfѹk*nM2#p~ӌH3a)Vk^Ʌ[1Z_~Pq5 #<н7K//zȆ$:7R67lTnUPrڷ\?ÄP Wj=2H]ɚ%ޗ;fIl_/FXxu/p'dB^8q&""4"=,h DkrFFZ 7ըDWq9j1!,SbE_Ϯ4 A]ZI=uu |Fz焈7~33XREDxw0=_ǰstQV|{kO9~~ WV0))޼뽥Q-7cvV1|❸á }rIƚ OkPJJ&%ܢĄ i-iS %҂6bzUʊEǍ/4t߆(Bϱx EiF]>rRwpyql\ G[L*w&'L97lL".SV&{~,y4K \\ټdD0QQ)rC. }k썼ACn8pϯ}ʹv}˽-2V.&WsI<*~D444 2ʹp4&30ėj <_XJƅO5%7iҤ~B,6jwO2/j(C<8^ˎ$`.] |R,3jVtm&{bkl" L['f`Mc3YH/|.uH{Nqjvjt:pzqWGJ)n4IF>66sȢ]jq#Zt޺I>K19_[{f^&RjB%O͘&0 ɋ2%>ti|&0@.?{^҂Bt0% )LT3N.2fnܕ,wI+!i+dMǨYWc21l-iɬ7*vT0۶G`unqH0h][{3hQcJs^re"#HkS&ެ\b*gM^-0?hO3,h DcD#ʺק#cAS'#/0*`Lzf/Sϡإ?Nq0[s0D#P \6HSd)ڣ@od1'+!'5;IS=AGϷ x* sSk6HKI {j(F-nY[?%0?ryXXG58$b02Btlvz-w4Ėpw]=J>Nt> A8w#AjSkf~E}PcqV Rn?9z%cV-_tN@w0kD n7K}`9iaNA( DMa9Sܰ]$`3plSN`L$tǹ㪪%rh ۚ遳&]zij2:ZKj>܀R^2ieXb,†gPX$#iF7N+muG)T9e .Qx^v[z8(++=zDu(Z(}s(P(pkc&IKKC&&mcN nĎ7zg|xN 쾔a;훼35qrSAHcYse7uxy38$-eNbW09h"Kǡ ~kkr6?M&ɴ- hף6NM֔ks"bo@(?R(T*jB@nʧ)hb}HӾn\/SIJk_}"oSW4baqK1Tv#MRfAu!yw}x+8q`yYͥbS)U8Jzncle4( c _aWy\SdS#jirܐӲBd))rIkbeUV:`DtB/-n)]?H>)쀡J7| GF45k\Q'1&SZZjq/`o~ F*_ۘGBssHѿ2zbXۀC;|:W#)Ѣ( V;xC ٮfKe f6,!lB8^GTiByk5au6ϐ}$jn9CA]4 3B.s>@@1) C7R͘5R gHf=-ZU):BeJ78swԺ}Qd32G-bmHRP39BN.71 kуI@zi6vNj <77+NJ:¬z{ueX#pEU1HzHgj=Yd3M04K>ϮgOqDޗ joI*ΌF FCl٫ҽ@V2g| w '\`‹TH`hBqz͝{|RD@iI:|W"\|IGl9) &-VAB,< ٜR6iO|;0ʖKl[ F+qs66Be:'E2 Y|C{ '}+V#K񞁦uR=ki*!0Xd!/:aItOtzNE:qb<]2x G?ް=>^!U_<~Fp rm2&Ot Žp+ObadF-VK r'G O8)SWg~uX 8ve-"DVt%/d?r4ms=;:ߏiFnbYtz_IgsJ~*ޛ cC$\NW[5c7`XKekR$ôvǮ2G!4! s> mf9,_EsJ[e<fff⑓v2o}(38犽K}2A9sY˔ܙqX.+p[ L+*~?<5Đn }49ɬyˮ!ĩؖgdyrt# bAkK#)Dqe,.z%^մV1r= $)PΞBuK3߸rry73||.׻V4r@m,Ƃ\7m#褖*w)''>")ۀru/R4KJPpkM|TnͼPiV5Q2HGNJ^F42), tccP%iIOjku <{{e%lss;Kt}l6j]~hɄ?eօn V1 xF45")7ZTwgЎE[RCd+_qr93^!v+QTqٶߎQ}h9NT*nnۛ NŭID1}cl=*g16K| VY*g.N=ng]}a.&Oԭu(:賦\xE_q5h-8M6Mg!fwMdI?b{_դoV#+%`ky4k5mYXVWF$#FǒZLa'xNM?mbo!ieOJG;Jg"=n||^ҼL5щ:HONEy}ID͞H95 #Ӟ#̈́lJ&6'8SHݘY^N7HaSGiB+jAE\vrt @8<'BmPGZ9e9[*W@WneٺNJOWt_O=11Q:gsvvvrr>?1'{{_i# n^V{6!D|| TωNu:& I(,υ{QhT F^vk5Ѻ^[F / 1O'P]q 3jc Y%en}MѠ򤩰^i|#(AFї!0d~ߛbM$9VQV0PVqp;[aRͭ%da;u/܉9_+-3$LJ_H/ڼIf2 R2d3}CaEXSۣZ$%c  {8Z`gPv3$ښ{ -銍 SmR3RtI!Yg(փAߔ5zCy;4fѢ(>K*H7$hq B͊^.t8y*͸lzI,j$Fb O)ƨͽB㝨-1]G/.]x"1JB:V8>ToDTIRSbvkΈGSQ@#y! q7sq3y\0-!ڍ(یwF#0q.]>Lj &_fo5w|#n dٻkk" %! vj|30ѫ'*V\H|zEV8Am)+Si>ISkZGBPpXރqۈ~gq"LW*ud}n~aYtLx{ZZ2UPjso,u Z39iʗ˗Xgi5qKC1.8س:l b'͆T6 hRdR|}ubMy:mjԇ&^=uͫuxyih0~uQ O"w;m 'U)\ yL g,+ R|aا5jAG xhE3c*(E\DK*Kod1@#B^mQ7t7TW̸4~0سtX/o`B#ʛnm Rk^ wrƄ);T\p-!L֨@V?8n1 6A Ѕ ӀB|ҚJ[:)/P1Dst֣̽wgƔ1uR+i;-[җ[FǬ';ZQՙRY!N~l+mCb3Re;3*~ Km#³5hZT96$?]9fZ%R mՠx?$&xN.|vw7Q;V Pe8{_I;v+ƶV6Wfၜǘhfmܰ2rү4CŴ}&HWN%Rf9p[/+@ j&Cq[50(h9Lf_ sCMLRج3Z-5C dX>]KIN_^N~1!>n.7+H#bpTtVZT<֣= M.3MBO-b;JC]୍oz rĄTZ ƛOgF0v)>JWT0m#5@rF8Кa84+^UGꕳY$4&s_&I#BvB%'̖A}| Lj*$qA^qC?6sE_g X cwb:\6zt*\x=(pT;tѣ/Կ{:pbA;`P8 Ͻx3HlT$HDʎOk;W/ V8Cػz' ҥ`b+n$4!m$ybuڊĕ [mӚIfIp] Pr!}kR;8(nS?<MbY [kkSOёuJi o*h 3Ωmq_2aR,JuP6Pu/|޽߄ުwyB}okrM׌.+;Vpkqֺͥ?E`hX)R&9JyXj41%& Y0L>Ldfq8i"@ـ;su)gϖVZ=j{&7 aYIFkUwR&qRH@@]i> (m܀W$@Ur76U0r~nm\ ֝aӨYAѝ?6h>VL> tnXcƿx'sh ~O;IRN-̍_RUgm AGN852%yoGO{~ϳf 8yXnkvyYp&5A һ!v8uKI2:fWuքiy1ˍP!Xlx>jj]RD\gyr"yՆ 7tXkE)?M`\{>ҩL!j&2s <iAc"Q9P-3=|SaEH v8d[hᙴ 2r%}r(mcsܛä|yjtgma...lWgo;P@/!4VBggKdr&O#$:;msm  144C_F ARy` @aj ;9䈑'։wscs9B@!P7< | di/f>i%PHsrnRprRrN2y"pRM?~=s碏5jJA+՚JK] i`!swиZNs~uX}saW#dhX9Udހ5řhyp;m֫37lĐ@`^s_-#lf95ch$\E^3uC,:M,x'Z E(nB^ٜY AM|uQE>E)FY>&uh 6ZSuuQfn6p-evXW} ,g9Cץ_)N] R\PbcO[0#5Dq6{=ۥV?hkt%z簻`>Y;ʅt?J鼵qB 6R/QQ֨C{i `T :]_Y8*jtffjBDDfv̝h7Ռ/fr &a4[;vI4J!]]rI1SP<&=Vcsllu[yh(La@LD EzyT1m;T^eTߚ-l]wrbc< Ldm*\:|-7<3sK-. ѥj@RP8h9SQ}_ql zW5\_PNVVRGXH@_}SK_GKFCɰ|u+:+E4u*h=;t.)񬗔Ƽp!{jʸ_E)HIF$E-=<0?Õxur𐐄A .4A}=-̛7jFM8 0j)x0'B=\9rLe!"Tz/IULf,?d ōݭK bIvɡK%$-u u˳f UN~ZHHE6zH])^ŕ Wb2ꀓ)C}wu;!"쏗ks ƶۯ#1%,)6OfʾZbT3Dyܸō;@7=hА{~Lw Ye _obLQvL_K7E[T>4FO9WOM!?>{woGztTyI-:jSZ~Csj9'y f[ r12aG^38B Q~Q<7,%}aԊ#JVu*F 1`%]ՍrzWn5Y5ShZ\P8/ְ.<ыr}XVK쁮O"UW &펡L:0<0M.<9ϯgX-?؇,Y(^?Oga ,,wr642fB7 E0-bYYU?:&E-3‹wOm!ۡ6s m1EEW[Za pͨ[i, KZzYqi,_*^v- -NaX;nlb R]$D  "\)Ü^D y# k鐋svalj=ڄ'?rARk}xvҵ%ԋ^SErB5~SAc&H3ZלD$4r<KڋQ,"GT[ 3Ca 3eLQ[ ZS5| GwwYW_vdf 0>~ĝ0 m)Pk83蟀N/D#q5D"PI!$y>OcfvMOE1E;1بP_+ad@gbj0j(<'F]g%)T]ચX^娕9K)R|Gu}VdPfw:`x=:Q9LJHUޜf>T>Kϯ[+mkr>3ٲ׭ӔPڊ%m}~Oa<tJtbʶtJƆN66NFο(yy!ſe175Ѣg7wt4v_88!2vvFwr7]hE' @+Bi~F?2Q6Mwn(mgje}G;[%ښvշe__&6o2#_~#?ISFZ'~Wխf3__LBf?mdgeq4 NfT/|/1Vg6ѷqߕSHbr(_XwɎ7_9;1LMo??'-DFӽU;>'lcgb7PP wFŏ)71Ҋl+VjjIcdlbnhn+/ O+)ot? g'ߨbT֍_Er!,!?Sچ]&t`m|dtwĿoۿ,V0vѷrH`(ڟClG.կ1U/VZ/_Ciz:0|_"Z jR!6m/[׶C yxR-gg?}Z {=*Sz͇E%廪Տ5C1Sj5[l_O.B!i)LyCۻ?YO>`6#0&6,Ǽhyt,sq>:ߨz4=T7=u8_en=RߪJ.גD rX#d, ㍼ԕO/'f;$cB?IEZfMF:lZLA []ubѸ x܋J21&#,J9-'h8?^,:,K 8ڲ$:a)weq>Kꮪjڷڸ\0l|`H̶D\i'q|ڜ$u2R!5kSԠ\8|]E_{۪WZΗ`Dug=ki8@NKHkO2yS!#1r<IYfv XٰP-W8U:on"x_z!ZJ2hMDV͗LΡ:_V2BAIRgyD7CX?(Q(Q{!wHA"{qbOۑIdoٛD&'Oڼ;*ΠۉH8',"qj$a) EXC~ ƪ*| O endstream endobj 195 0 obj << /Length 3026 /Filter /FlateDecode >> stream xڵZibL5Ɇað 2Kwe)iSG_3lr!WWYdY}wׯk"oFdLJKQfFzk*&#]t|{ß'{jmt+sc|V߯0UQHTι*WW^ hDUJɼM6__0CV䪩4uRQ^eo~W`EMVI mw&%(4˲qrno3'a*t3YiRW}*E6iv%̟3V__ ѿVK cayFBǭ %|sg[B H]E=ZDwy)JKU&Ưi&maf"48K'ƅ#l#۲t5; @Rx>t0X-Amܛ=K{"Y %rdr]]q[F5t!~tj=>E6]3(uŮTȋ`ؿKcLNCs&0LuZ#هR{#آqOh'W aʴO#yc`{f`'; ݦGgro3|Uf@ȯغR #z%*$wϽqҊ@wE>;J+j + e^ ŃSH ),N$yK,/%`zL >a@M*D}aԑ%jؔnǶ?x՚rϦ"Ƞa e"E48Gl#ivc=$+ፐ`qӓ% hdz^@B&*CguЙ;L8U`޺W)倝&Kj#xU>g<2/wYSPq DS6Z4E螑CD*g6N!8%-ݨz!sYz# }RguQ&j  % 0y͜8{/cpevR΋4*\wt;j)'} [_wd>Ȇ{`gDh6R aӇbe Ӡ|<~m8\3xtH6QZI  Av=!Qc%E|nfg-<~px S<݃v9ztlڵL{;KQp"JI]uW{\H^ُf l8eJN#MY֣͹ml 1~ y%_-{\yO9!A` E2q4<{vΊ<%%;WvۻE<ϣc=:$92d@cu!'MH3'T/Ihip] [nrU\,CCoAݒЈ9- ]\3^$vb]G˞B; Cs4g!wb/; 6(oQ mvZl D}Qw-H`^G[ZKBf[ fhB1HHq8Ǝڿ"PY<[C3͊,Li򜕨bpƒ۷BO\j̑3ժl:n~ǖ\=]Nͻ܅AwP{lˮCGJxlJVu\8[u>iKzx3tliz m&ڦ\~(<Sp||-zEY\#Q2 RŎFMwk>yv%A2T#BGݲ_]4)f]U@9LXų_b(BJN.vIUCGL1(TSmӤʅq\z:ӐzI} )!kJ`n ϻIoaY5y3u&@' ?'b6C76q2!_ \lC@ະ[X` .#>.O**UAi3Y4KQ'8w*e}ze<}zeC*qy } eob.z~`j][/Ƀ!!@X"wǶDEk݂cgW* ^DfT c[WU+%څ}lG{-y*4(({w ݏ1󽋪^gru\~a߶Q[Uxw&F.k6 Hv(mϸo0Q(1y N|ax {Ð {o%t {CK[n) Aqx,!&UK/^Y;寇N?,P7q{]ѕN.;]$ޣ?N7xh~/p\ Q!:S2{j~l\> stream xZ[s ~ϯ4bΤir&sN;:a-eղj8@+ʲ4b. |$lӳ?={N#:bTQl>46ăRZ nRq&SոYOxm=~ jC~_c[o֬lnfk Ưs{vI)c.EV(އ^2w0:"0҈EUyY=ZY&鶛*_xsLU3KuH{/b[\Ih?rtfC;G`?oԆ xih^[ gIWEIl~rMY2SEnj+Wrh4mQ! ɱLMn=nܪHd21ޡxqec{rfN2ڜ$eB7zYjCVv'Rb ܷ$[( **u[UphO=E}Q|KQ([|Oެِ iY\0,̸d ѪA~K4Ӂ VSe󉬇FpF3oqs;͂?uq-(1 "fY/VRG"[?G|m v$e^Z&}wn&|6ɴ@We[%b[E3iK8]4a\:%RFvwh::A0"8y( @,~7D9iX.2qe݋>~$o`闊=%'w= j6x:mC fKb~Ey|ώ_A&B}! juѴL`0 h1 "ܱaU/ PDq%E-AHu ћ ņhT)?͢ݐ l)I+et~!p7={;ocYdnXt&t3:DZJAm_bcGS^0(bu w-O'&sOv$Xqz=H sV3C|484W;5ƔN|r*a*.l\>B75#4Y=5%0*'UcmIBǴ% 65ddXT-EȑS]TږnjxAܿE# 7!W h]pOW7N.Cge M+l)cotn漯M}>]Ýo9X3_{*Q׃|&Wٸ-|t61XgLlڈd>fԉgLaqV-p-f$ҥ P+M\)z2]pjSq5Q:/kLNY.[@#[Lp^TPX/.HTE"gZ?A%v(D7̭6Czglf.@Bt_퇛Tm{a&6׵ 9n|4ԕMm<^B 'cIÍ7tKz8_ | {nTN 108 ѥv #x;(W)ʃ͋ awJT!7pjpa|pD{+m\g6Z*2o.B VJrh"59aM X]v%Iޜ^K.kQ˾!Ju—ʾ6嬭,{Kb4+[l7;[1twNxC/zn쳿6a9s恾,-&Xz"l>¹` {|r»js oםܖ JOl+k9MTʼn^۝tct%lؽH;u˔fW2z0:t~aYT]pd6[dye:qJ̥t; a.Bj*RLJqwAjN!Uǥygu\RU8F1\k§b >CLìԃ+ld*-sayD\BZQ!&> stream xYs]'r7Zm3әVCg?$D1& ~.B-bw_`,47/ TiΒ窴>Y$o&IsOg֚=|3{~~1^~u@ZOJ6MU|GT 7\hN ֨”|sm,`$U,$!:?) ۍS~JK kYH# ( P5r "$  0@mhfݒ7zjp 3`ppȾ_E>ydѝaΥ]e&K0h3<>8Ȉ-7;CZqy/o 5~q}u͌hĬ̕΋>astkk'6ƞm7/Lsy>S  r^6_ $T,|6.hZ?>=+ }e_"I7*}=+c<̊3u9Oj^Odg[Uv͢"HT|m]0MJ"-I<-/jgI:G!XWu/Sb2mxS+Pal:NRog3{X=ePrN/t]TL$s#kC;ѯBUư4tomL>i-*NSU(a[~IIS P)zE@{a/eca=Gl璟ጇG,S.T2.q2*5Oof`c!Pffp/LhS24 ZodշҤg;Z.0Fv= K";lxUj:sj%zzhd[)S-b\v2=bDO1zGC{{z ʆȠz:ۏ51B]ÆEpS|5dñ! Ke[N;?"Zb*F9fm՞$Va.ɒþ$;C)<-<vz[ߓjћMH^ns7 ˞ӰF>lc,wWgAWx2+uQ-~"FD8 FVD,xOv V2#7"$T}oTmG -`rm!ټ1ϸǥF<P-gg~c@BҲ-%?9H9 ̛-3)<΅Za%/-jyQ:9e*)bkJr5ds:Ts(^gNeTejn:=4 ey~.f z@Iw&j;0S`^kqF0M$.[}u}luᔳQ0!!0٨-IbxYn =,kˠ"G( mvi Eb`0uvmܲ}~r: ƫXG7w}IoYmrζUʨQ`)€! :P&DY:GHEY.F"*wTA[  9Hȹ*N +|f#xВiYEkuŤV2(R}GںŽP wM6XѰ|rNI]WӅ¤MFnӃm1/iphoy*ؾr߄X$JJTpD+w)MC$k4P3r`_ɝ*uVȰsʦ[JeǠbLnީ!r\uһ]t'۾PY[Nztc>ߊA, fk`zg퐯bnȶM`< ]t>$8Vi]LaȊa'Bw]q/vu)J-= =s^WZ@6H,WEk1]K~ejs<R6:k03? ůy L'(KN}ju쀝eH$ފa {+=lvb`)]wsk@^2|"HFQAFGU2BlrX| O(vT8m{(l{&v!C<5=lm5NxWd%,_g<ɦ_٠y5-üakEoĉZ !zތ.RȵcZ~> '>S@=mg WӐ=nۓ$8+1gF=SbpVB'$DGڄWDF´"7 !z g~&/c0 ~>a"2 u0풧Bw_ZK endstream endobj 227 0 obj << /Length 2901 /Filter /FlateDecode >> stream xZ`$X4l uR{Q Z:6:"|5áDiGJp79{^ŹJz*P^zYEIz~:x܆,J) r r1P1ޛp;@v+5*g>-*)W<+=Oػ췁늉SJiEqyoF~^X81(Eo4?y1^"PyZ?=ILmIo|,3oQ qt$x;.{?L44a;^w I;ŹnlQ:|sE6Oc(['Qf=cCI'ɵvR)x]-8P$ ʹ$H!o`IIs#/?I܁%j(IꡌݥI6`tݎ9; V8Vb8lcp*Bphf0E(čvӍn #qU kYpNr; Q p-232deۯ| 6SIOLin_uE;oDv!iRtt-ӵ456=h'CG"]C80y Mw$b==XCP6,縷"sU׶l)/* bQwQx&ȼ?s~ZuFlG$EVfD}a" A7K`9fy& 'Z `Kl%&H3xr KUR?闋5҂I$mUpeE#qy&rcφa^Q0ZDZH~êTQżr '^QusUH3տ\ΤLDvP j"`2y^rWT [_a) dGf Y' m;L*ysk/Γ IY1@,TL~:8~bhC$Ҋ,]?`0z~W@&JyW8C0h*)ZVlZAo"\4BÎB55\8 URDy]DHh'Osu% H`VyQZqW\8rR6kLrNؘlJj3\4v !/f I ,Lb AM@N x[bI*)T4]GQjݿ;zj8O<T!b"[N%.IkSo:8h8yFNEIG=v_8D,crkn@#C_#DquM$V ر !(kZR"}[FNU-xPaMG~ T]E|@*y%ފˊ5T]uɘCi<ճ׭5]!lYɵE312XOH٦ T`T=12HS4%> stream xko~aǽ4ZA@m(Ù(|<2;3;w?JhqaOJK"Zl,牧R!%qatgE{zj䎥0&u@fed[|62HoR}/ySGD Fv|Y+ LXh4LȬ+{9? ΨdSi6'Dq0DBu8 u#@ۗ 2;2J-ǘ33'`w 3O"p]bnв ?NKhq$Y|ٓ i5i_8`v3+jnd LnUN0X fyn#0Dq+-Z3v|4T^;qTiVcg]Kdh'wp/oNN=I~K$ ,Kzf9 ]g7yըtu-,OWo%WM g:^ 7w! K3@ɏX'zʡLJx,+-*\i "U/M,\ZW 2䛬[-.΂E\8tb m\\I*AaI#;'V˩F6"ƁDlF[u+ĨzqǙ ua/ 7h Lw)غ)(Fs2j6ScFFpZG t-L{5&>7.e؜S-׸\S`E0@ _- U>s-xJ2pZpjJ"&Ss@y6Ϊ05H ~sBDpt3K=w s&u[vg>SϖɚJ$)V6h.!px˾u,] {M?YOosT8_J;$?hE:E©>O.܋;s- t⨕Bcj4BKUJ6jl'+gyL#ϩǮJ$U(R$hKn6VM^PP PO *Z;JXcS mݍ˂Q`V1r<|$UЕ7c,ut]s`АsXB-%L;~LU4I~Zo]%xo0 {)I-I]dj:KϬ3 15A32ܘ$8"3uv`SYUYg%q5eUQF^!\)no%!񷕁 ;B$@A]XTAHŌU ,4VT d,$Y)Ty?.x@xi$ȿⲙez& Y7è jVSa$N4}'bS[wm}3L endstream endobj 245 0 obj << /Length 3175 /Filter /FlateDecode >> stream x[Ys~ׯ@!֚c`JR6rT}A$<,۩t  (R~ 1=`M$3F=,J%d*PM4cjk/ !}g >$U;vf_w`J*vʺLcl{Ӄ`xx^e yt||&:k'?|-͔N}Oww++/"\Ƕ n9/0f Ø}500e}%ۍr݌ >qj?`%LhP#U:t!+kbK s6w'+C@NmPgf:cAxo3dǨ݋gs4{/ZZ݋L7*Y-/?vC@c G4H#9;b19&ҢQXk#<3 0-^4K5`[ Ů~"+Xfݨp?[Y%}Io#y(D\1{͐9^3eyv&pRn׶FcX}k/ˑȭAۓ5Q4;)Mxo{o̽+<rS!9˵/{+ h[v̑?Ȱqy<7ޱºs@.:Rb?7b!JOZ51iҊvmf.jnaO$:;K[6 R͉iN.N'!F0ޏ})d 5-xYK,m E'Z s)' %ӣ)8!=&WP'U%k{i|Ӛw Y#l S0tQv^,t%Mofp·?Șǡ1 $!E(dXEw.'0_.X:7OGfΊxEeN0 G6!zy/2d8 ƃܮ|_W2xG@zm[}vzzjŒ6 ME%7$m25uHs NOFdDvsM-Kr%B|f/|#7sw Wjx΂k'lTwU5zъw']$ewC$A 9&9$v>,B3ʙZҥ7 \9X t+Rr~Ms9\L&Nd=AEhy-i1P9 :wA $'[pɛQ tU^Wy.%RNUWV]`4Uy [5%8;o[E4LvcَL$pOm$VQ,Nv28gݘwr~3)/[\o[[]lνDv pӒA^_ +djGl^H'Ǵq~OⰩ/A5Y2rZEX;RnƙzT~׾n԰^b|=4WZ 3Pvbvx|'iP@ X7h2)K9Lp&){4IpF'uebAM`XB}4]*5T+5.uJ[5!W$dIL]e*)ƪf%{-EڂQB'S: >;; c*\T+|N=/dk)*38YK߸7fΝKpo, @l:vkGzJB7ƭ;xDd*~ɥ:?7G\GCہBpUn e`ׇP{L;Kf>P R"5a^9E}M@-roS$ChuMS?ŀJǧduɵDС\H"Kox>V;ѠZ^?DdPT*5B^*-8%&]' Gǎ E@Bԁ\!!U`[|ՓҖ,Kt[ K K;CcOo5e\5m`ӊ ;߬j{`!*mV[@Cn2YJ#ث2Hq&9&ϣ$3T?Zmk[0!8 u]y(_zf_}OL\m^n C+[vq UV9W/Va;ul9 endstream endobj 255 0 obj << /Length 2680 /Filter /FlateDecode >> stream xn_A}p¹npI]Y#Ѳ]=rH,9.R̙sddߏ^x[Df*]$JId49h}٢ތSufn'ͯٽ3^niඥup-޽x+m2QF۸9Gߜv$1KdRʤJJ&ˣY2wI&tU&_h21/_y,q;ZpJ FYᐞ Zx$L1::۳닷G[3,~?`#T9\_@ 0򏇲r/pUrkfG8fhച<\*̃$4=ws|r<Ko[|o-dHLt% U&*GYE}zT\fZ?e{=q!/x=F]'%@ ?~ 0qlau7noA>c4qjz$9\(;H=} 0lgݒy@+Hg].jn`SC+ݦӗm=Ή 9BAʶcpe.J3?:5+d9]9ٝ+«sF _z~xɟ+U ^"'rwTƕ\ϕ;h;;. ؞:i|9kJ_فl"RT*`b.-,Pˬd5A#R]XEaFƍM1izu Bg̈́Nje`8F1sC L\wٸ[_^A94޺b׹"vA`HV>wT=~FB4n^ȺN'8RHH4(Okw)-U7C #6hЍf%*fSðMlUlRH#Lbp1+ ?+~ƲˋCrYC5 :BʏڶNa!7, *ˁ= 7`gh*6fRvjW DumA.UUЅ-EܣVaCw7`)`i -_M )DL;SL]6) 6.{2oLZ`i!@^@0xޫ(RXiz p*05{i碅p6qI?VfMt^+1qTC̎-n(R 1jNϓz oPS${?Rf7_<7p4_]7J^/IpCfọm|Ds-џ(HQC=b 0{T@zM`d*ӫ@%w5tjC$̋8$}Q A;/w0⟂v,bB.Q>[a/B [>oǥAf˘@~|sU4NJO1>rR8,eU43Q4tE5k^FˁVBAx4"gx>—K/&6J;t@?HP{٭])3$zQ((V\ hJv8Ɠx^!;=YB?ARLFAz;VvSk|@YQHlX~F~5k 7ns*A\ 3^ʊ#v4ݸMOʶf-L'8VRb^m01H 6]^Euf9m.R\~%޶<-L_77Y&0iB<@2.P>"PvBW/5u\.ǃsVq;A qmϟIڼwNq&uJ?8ixӉHwfqm71 H:CGO}> stream xYoFݿAڍ&l[ &-lM"@Fe:\Q5䐢%Ӡ98Gq^=z]cUą^G2º,z3ݫ=˓*<8OFeO5r=֣>Sx\_5Qdɋ 𱀿ik]GeOpGGq.9uS{G\%kx Wgix#MF妈7h?GE]ӪU$~zYLcy/[6$'\y)M_ƨ8cB95qKD;/1Y'&9SfX'YsfgDom;x"hU'[41Si\Dma8fadA3ޣ.3bV2MU"ݏH;Zu'0ZaψgAt&!JbKaSFp `Uogﳯ毎쇿RaSuPujv\*NOJ9 º q%,g%Y'{0I3" <+ݚoOs>ɖd=zx5U攁c(_͗D EaVW|,[J)|5IR4x`TKiҖ~/M ܩFjj!&)'#='k(}h)6$I2޽_m{֍6pqRA!ՓNL6Ԧ;^ϵ7ԣZM\j<#ßÙ` g7`%0"$v¡GSNGe ͊؅IIƏ@=sYeP"p$ҔMh1UٶlorM6$=9XR=1=+@~ne[!yMS9]63ܛCdb Cj:Z$e=NSeDk$DGW4[T8_-1EAq dI5Z%q+ө5m!@2-'q< q 33 R&db ɴJ ^R^x}!4 = \Wo=$6yR8$JP܄Jeb^Ld-AkXT³kFe}X{+2ೲ@$J+ݭt"@G,4侂biH\ah@Gi` tۛj"R \ ۶Z^]T?,%m+5qY/5ʺ c~n?9"+!^t|vMfmGr Dr>rnL@|H^0AzvX:'$3Tw2 3 W}~}Hx^5j PU39AF$5oҍMr$mHEȽ;Q dh=j7{Ar&/=Ȫj Q+gFcF'v? <2ِu~OSsJHD{10A痼aIIZTvu0D8H*6[vf/ cK9$dcdI6nsu䰫K[#sGMpjЎ`‡IMQH#cUiʳ}%Y=JUꄰBW#:HumqI^m+ZȾѢm%3+8t#p뻒u%R=7>ͰH ]{7Rb}8YW'C̾ I>1Fk9y,_"77cw]l1h=tUF_\QE=3+]s<)UqK݇0Thv4:ÈeFA +;;<-B3KEfz.>E'$KKp:P]Tx$Y<:-\q}$,+41=02r~ u3S$aﺐ(_qP:їAٞ |"xukL℘9lVKk xٶj #Zm:8H 5QbMhIoȃ4vHhM0i@z7؀k.F G/?RAlItRҭI(mzJSe6RUܾKS4.+a$r*uf4㍧OD_ V mV%v)$.E^8, ] nMA{6m^f׺۶wSYK{A#)XJ[%sT/f Bo-UƤEn{< ~ \vÎqݼMD؃"7}ڿ]#p/4 t#mTMv);bdeWi3 !Tu`Y6 vDvyMvMg ~#eӶT߂ơ ޮ;,Ѣ-}w Y =R yӮ5ڍ`q8& H\- FXĀ`2CZ-nj'3^."SPr*' RR2nqm+c4a?pZ3Ngûs3nMM!7J.gLtYaQwJ8iՙ>7 endstream endobj 274 0 obj << /Length 1957 /Filter /FlateDecode >> stream x\sFxRs&I8ӌۙ&n&3c07/{+NtNt{`g`uE,Qc\\;GG'd!)s.nc8^e/98V5|T(ŀ$/#acrѨESK'"NH=yعoh9}~k²n6t|`'@\&f۔iN }.^&%Egwoi)/u"y{'|J?$N WM-d^3XJK{A"ȏu[ ~kɞ.pO0CA߃w/7ap%r+*KBU 5!DDв!3X,-b~lNA= JȶrΛrc~ب΃0!j3RbZ",L|l@$5(. Bb~#׷Y"$e69o2Aފ5+Y.zyթ($8a>!j} K QnD#(Ĵc _xұkVÃ(z4*a>'p X6 v'oNZDWM-Df;D-bSWm]OEe6%h %{ؼQpxE Wnj.gnV t19nǑ_ۃ5s"{k.}rk5T2j@I7v F&tC't;K&DӳI5'{}7V{Y;;L8,jwb,dԇQC \(J>|THh[mNd>EǻBB:<"r"z`Z|w`4cgM f"a+ycC˜ʼN&b~Jx2[XXnd*_+P:YU-7\r(^Q{@wjB=Yӗ?:_3'EN꽄QLZ:aW `eCSUkپty J P|(Rhf)a r wPP4V\I椨PNfG.[xe*Ti*w҂梂B\9/FAPvX0Ʌf c|K|]ꝗf59^͵RevSj-2H q>-nIc3m}](ؔ/ZoV>dX|otvhy/&\KM8ꄏǹ-ƴKĮ쓚UXXWS endstream endobj 283 0 obj << /Length 3993 /Filter /FlateDecode >> stream x\۶~tA>fN8fjLyHIQJn^J5*l!*cG7wJ_t'OˬY0yz]f]Oz}w6/lwu9._Wxs Kw9^g޺>÷x{췃Ͻ#N)SM!d=y/q 99N$qs˕)Ѣh'9_0U9zCV,7W/$GX eOeKKFRah-׀,'KHx0XKQ*ϔ/ F?9KǓ;#I{KRw%>/'t&18ݻM $L5S;Ijs~TĦdI#^9+|'2GVfEaY;W:' V2{֛gg5Q{wjSqr>G<-cU*p/ELkZGXC;Q2+ƯW3{./|I |kwQlO_} ϓnVfFeγ:.r^ʵߐ|A3Z㐰.пeoB8Zt)l{b? ΑT, 2LGKoWlem"n97̴ 8p RHrYO6*EleEQ>ȩA)!g-P>e {SgRHW?d8jl&*Zwk2q빣k@če>1Pr^}jVkp]Dv۬`r\1hbrL*Pj\oAL$c̱82хt;zzԼrߴcZQeȊA> օP]qДw.S'ntolPHh22r\87q70A aq33s"Q&yJC{poIWl(mXz~hLTn|լ EQdYoym+ߵP. 6Gp`&-m hQ`i0OܬTP@`H麚^&Qy|ToLQ٪gBDݾ$ G ej`Z T*R: j,0^kg1Ё6BL6Yִyj];K G L.qN@A&oL&Xf@{> c? 쓡'tvz矂EuLw8="FSM ,<7(\Y'0/좝kzך׭cp\Eaʮ6@(#BxthqS u4MNgagY)|i poT_x):Ũwnrey5uJ&X tMIQ9vMnȌ}):d[ro6)ʣW2&>8.̪2?)"-Rt r(RVv7Z!nY( a)۲'(Y7p@ NyF Û;&mi[Duw!CWB!ChF[x)NX{{jzrW!Tb=|m;do|2TivCb{F\92"Ju Pr51߻5q\C: 0.x;zW{!/l I`ƻl f HNka3u7g:yVga-(DL="itV)Ci`Z"'c]GӀƧ3`%kn1 mޚ,Ufq[cs>Hsn<&8 ^6f壥*aDg$Ҭ=:k`ެ|mV^j`wt,$KC߲«[gwQ}'_[Vp$ۆM9n99^ Ϧ6D_qjj&0wQASHo LIؾtyf%EBC.vBHzlp XV=`NIq}sEt޻_@P.fP &KaJB\;NgSRSVB$k_+FԴLi9|laoX+z VI%6.QzB385X71!a>PpشjY,"3~Y<]BU)Es,k%lag&G5,5B|5e37 {`. 8V9@l/A<3A :8@u5Ad5Dc+*pMK rXV ȭ;y|MT ]s1VWb̃fl"%GΫq8js~ gȥhUnI[LFbo=FO9iCXڿY0ϵ‹a=qfA?2g\P6Yw=@~UbF &8],|Vl+S[|L7t@8~w>+y Y|q宏W"4d-cY=~yJG6#[̟1R \rm~? _Vʣ C]fqk/9?? endstream endobj 188 0 obj << /Type /ObjStm /N 100 /First 878 /Length 2146 /Filter /FlateDecode >> stream xZQo~ׯcg8Ap@.\[Cۃlg3jK,߬mI+y3Ր +*#GBhh 8*Օ6W!&q+1u*fqƠJv}\[Oӡcnࣘ3DXc,S!1j\4MK"jB@bZ dVԞ:,".F[QjgK\^ =ul3)SCN$Yk$NN+[=!XY"$ b-NZ f0KD&X+:0&SS&"ЁmT{.iۉ.(Xl #(a)OSӄEF; a ˛"N3ʂur m/hiD ,d+qBZŞd'e8b륌mlm)NdI`pXa\ ;@t1tVwlKZ[j=a +cjK 7Ȩ}o(>7J. f%;VcT|ד_o7}3͗ŲjS9%ӿLb@L'\=$UN2{"Hq_驛88wwWƟ˲]gwt}7+CQ}Fc}#eJ`appPGcޡ_OO_\ގJ+y4B|&"s=yUhG^X+3X(yA~B!Hxx• mt.W8*EBvU~ol$a"7W ׷%d}EZ$&/;zs8O?/rA^}lnnϗOv18}+F:7@?m7&Qi["gw&7__m3GϖyOqa5=4#ZGvj$n&~cI7rE0)u6iwcegs3lyl~qܵ}Sמm%*HM/7NVXk-VUvUΎ Ab+oȊQjl4{'P}(t_|  8@Pf s'A).+(R9{~>d۳JlodΏ8}pu`@T>3*s /[*;L<{j*v τ[*5D!DYA+#J0M͘;-8f@f=N ڧR]Xq!#=zW|] *=շzW\{Wջ{^YB@4hKtڒ,={ᄶ`?1sgjg# bA=lo;@+4E00y/xz t)x,"\UvA\B< xWhrd!G @?20Իَ;σ#)55Q/BUb؍`Dk1!]Xqֺb`j՗Xyp{LM.&9J>tw)4Iwp<͏;?QaQ^02XȀ2`0J_j-v^bAk DLv8 +L}E`s*T? iA~gA~#+{K2*ފ=#RF~+ipꥲ endstream endobj 291 0 obj << /Length 2167 /Filter /FlateDecode >> stream xڵYF=„$V46MRvd)ZYkVkK{kWN>3滯"Zo3PQGZ_-VV"07|x^;u]˕1:xU f?f s|~.9Tsˋ_UODQGVD˷qP*X#ZX8̒X@Y( ~iO78\oi 7w ?u!RDȪ]`AlP"H_1-V: W;4 .B( ;^(XfJa}> znϧ1x) ",Igat\sŐLt".L(DCrrQC\IN66fR[jBm2Dd i>5ihz%cO]w8.5}Z8ZB7֩؇\bXNFXG6̔Xȹqd(EN? DB !y]Ԗ;g7,۰xgST]Ӑ]AGaLQu-"$lchāNIu㢁 LbC %>J|4 W8ᜇ+)uGzR4wnmr]5h'\@;8k\0-#(%8sPIh3R,5S_?髄:oI u.WHiGuR% e'-s36ꮈaCili$- lkVQIZI>rpbbJ}* tr=9aTՃCn'hZO?ItJxi+ b/Fp8{d[X9M&./Ns|CʍoΟڙ? endstream endobj 300 0 obj << /Length 3688 /Filter /FlateDecode >> stream xڭ[ݏ6_ayXI{i>Eq=(]kmM7_(nyКpf8[6rՓ/_?g,|Yfv7j\UW}ͧj@eѼjy{8^-vw+,K(nYoV7s!t{sϲr[ßv<y-#kfN)r+J{wٹ;l9tPUh~CNb_~~'jQk3KxSNJk~~+K5%IJ c(g 0MQ{;[(SԕپV:; 8^p{枫eK[aP4I}__)Dko);vj^jqo[[kТ[@_eF+n!m=1]}G_=u2UFhLBɠ!# Ū~zsh⬉sQ8VH.FhYF-c>%Fd0+֏`LO訰!##0R)V1be#vH}}#.T&rnT*!*CMa0fĜ&&AuцEt+/5h 3_j¦6gI4jEhdX)4XwP~vC;/tsD] ^{Rg/.:*%ĺ51v+TԈh OU\ ٞ0F.CLv0u{.RJ%0O 2g@ہd-e B 9?`ѪGf9PYx{' UG~~.mt'{"=,[B 8nC+=RTUHؔp&tG$ˌmMX(ֶǀs1ۨU?o575Kp*bۧH2kCIK21L-y&kݖ:wҬ.$ m%h'ˬ(jB-:LKȮnT:km}F6\y$&ӒKT+%4 %`㥕/");/rkA/2M%/w:-8RPˀW>|SxpIҿ݉wˀiMt5WG~obcXcC Jȶ<.5܌2bJ*|`n-sIc [nLd"* ݑϪd^cAAg@> ]}HH[osڱuѦ۱B)-#e'n<=`zXqp$ybdR[ӽT_$R؉.h9@hmP$ELn6k*T W'm O*vҎ!_k"mVȾkXy͕Au1XpY[M^bLnQTnHއdxb`e\}cwuc<#̡=hS e#p0'lneaRZ1YhAl,Lq`0 RdPyߧIdR<#wCǮ ܌!xLCOz]OP΂qFݷ ,A%-Hʬ8 ӌulҫ ~ !k0vk}b儱[[?`cqSIVgO`ҥ҃!ҡ߄r i)!5eN; 41"15Ap]q0NXP Md`os94@ I>$]{ND;K^LXü0qÉʐ_ދ(^F- 3\}mC*$G- ]9XcVrN(ԥFδE8-sTu`?飇XSJq7[.n8tWlT2+:>eJ$-mvc[짜r%I|2q1< E;Gdn\JLJϚ0yTuQ!vdfrL|<f} M0aq2 CfׂUnx=WO{Vfv5Y}:YsϷa\'%nK+t?l3{4F+cnfx=2b$1X>&{- o%?,n_'oiB_7~OQv<ݾ{cwSz|I|/,UR&N!A]<"O|;:<ҏ:> pq㚐Ű"LbkvݻX Kʵ$"<mr7_when5!SHxp(z0k'|)$w@'`I!ڡb}]k1*Axtk{갳WaTB6&?É-o5|x_Y?&Y!qjbήF uBȇm+tHeF`_Ƈ$`9D[)q1ܦSlJ> BUcYqG;%5OF'/؁Km)=:'peܲ |,K r5JwAǞt' endstream endobj 309 0 obj << /Length 2921 /Filter /FlateDecode >> stream xks6?Q@c&\w3MLf|`$Vb(%ubid'J\, Hz?xJLf;>VBKDdaϼӉ |$CwQC;@||>õ)cMu>yws P$*B#gG)R%μ;`'EgZx яGVy}kO>,mqBL,sX˒֣Gv>m@W|p]6uόҩvB,#9P!79tGYk(O &[3@82K]d(&:kA1<{R 6G,UzrRɤ3 ]V=b*4Im rQZM wc3$1=e&uq8*#$0KM/VtHjO*S/: EnPbj.&l8q{xA`򴻀o8vm8P@$Z 0g"9ӍMق0u"]ΙLI8w` pAaD&j[:vnAOP~ZbJs&4:1|%",b?"N)| Zp AW<+D~ jN *dl^4 |- $"QTC+I CXB=FE RV.הlđO”KO7(|en&Q"v[5Ȁ$%{.D74a v`Z+',D9!$5(KD>m814ێl9pz$% JcGؐn0JƁJeK5w%ɆFZ)LG 0=sJ & +^s-߫cIJ%H2qHIT?Kb%&3%AI)bِf5oȰTy7ظJu.>cs&Yzye[sQTk|]Xi&=db"]~)6GH)w~8)Ԡ. 9 "t ә%Oně~MdFM&u&o:Ðd'mh3mi.(1O+bHI6R3yK ԦUa$ri5}y0 $-dAo͝-X %qxaDqD,?XuC졈ta[^ƻ6qƣiC(3'QtłAc2Tدn9nJN牝)|A:A'*Y?Ҫ:詅 F V\!Ʉnr85IzT*hVmq@B8>"1θHد׊ GQ*; [6d7#d.ϭaQU.*?E"x+q:pkO ?>_ຜ4T5+M>ę].ʕ&Y sdbt&3>"URdGx+P!"!u$?,'>w˾VniYVGFl#:b/+(2NZ/[Z?mCA_. S#ܾ,ʪz?uCj8 j&;u owt\iEĩ0WOP\ E->LU?)'t"}C-+P3kR99gXѬ)$#Ъ;E8:8EHkGknZ^ ٖ2F /|3UiolK<8ea Ӕ> bǯބ!iᶟT MZP9f:.:Cri *NY"Q7OLW5#R$\#A|<:OU7 -ہbVO;!W2~Xhi {{݌#Xw7K׫Yү.‚߅mjOj=0=uq1j6Ƽ-n>#ek4*Na@w2t$~{:}^vq ;_c̚gBQ=qX Jba|k^ZpRŗ MRb5ƭW6Hda CJh^w7 }%FPMGi*v׀Px[yOC𿽩)Poֶ0uU n` endstream endobj 316 0 obj << /Length 3675 /Filter /FlateDecode >> stream x[s/:!|x}̸I&㦉q=Ù/cEO'FgX0+^}75ړ_ ${=cᨓhLjw>aK۲im'bk/{'&DӴ^ 5X5<̲~?rfs=%^w?jFͧpW[V-&#sY}D7ǛR*I(]hg-i:R>uJZH[*7dV[ϸ}%w2n{gYSs}M־!1ʼڤzfus{i} 7kg?zjV!P[Tt&JP k,Yf-Z64 xz[0U._^0$908s T]Pp="7bs{`B7'*2G" *3"+ #{Zť % eɤ[ Fi|S'y7fv2Ȏd|Iar$EoW.'&ye:5Q) XXҲ#K#Y׀1DpYyKSl~ms"꺷^t:^/,$M`ƫ FL2٣. Gʤ^z/DVr{=CQ$]hEA:"HqIS/1TF*{ \B9sũyR}]څ/,}wn) $cJYЉ[SY:L7D^n}$HVd-N0pRZlآ̜|Y{XK :$梸;JWgP>:/A۞. Q&W&3}Mj |݆*D: ,'Y,T`Kfۈ^}א^'Nq$W%߭dLHU!Ppq.x i"5D#WL$%/2Lfآs8/xh܍Bqn\2kwq cU@.yq<ݝ VIh8۪ ]t .cF"(;@.8XHkÁva/Rș 2SJ .ꇱRaQUET8[8x,mv>9\̰d"7MC~EzaRv<2J\Aܷ 0.KF9%,; (HTC<9lnA+_UVveEqx o! 2E>شBn\CbpEG si$- O;G^I\P^%L!E ٤_͊r]JJSb|}RExV cEٯA]" 2D&wTϓsYYd)IÚ C\wgqͅO}`D\K4裨# q$ʲQIh[+#u(8,:]hT/*#Tw{5gZ*+S'\9BP.+)>T, :y/uBWA7QfXhj' /فCQ^  V܂`O\FFsn)m"fJF0M?#?If(޲~}=c%̔"ʾ̿f (Dj.DwJjA8@E2vw \*Zo0i ^@OqrߐQdv=EtAA%WfMc!G YUÂ\{H; E3AQ*xTs[S"`y,Wwzz!{p=`vspIBu,u1@-:򸫈vKKFS S\9QiO?AWx.C0LB 8ҷ ~Xk#ts?*D4hi)m{#q]lJظʔPJ~Vt҇)х~mݺcs뽯mk-ﺩ'B w߫=(k[rUĶp_7{B}$kX v# |$]c'KyQo;m]]*mBI8]2X*v8L\Lyקa0> stream x\[S8~ϯci%Ym;Pe;e;m;;iH([h t;o_]"_Ã-tt$A\XDad7_ QcGwg5\1Cb|) $pj{*T@ZߠGEN0E,lq#cSGAV'2Rr`knFiOy@ * qk C.?K#k/ʑf!duM OH%opMAp&ڑ]-dYl?AV[>{ 5~vh x iBǤ_m*NUR.ǮWb6_.8Xzn:2#IG~E߭ؤuz ?[v[sY1l lS f\T_"a۟|1o5\ũ'0brm_6YQ:o$𞟾/󬂴dݚbޞk2y- XRǕo\MﭛJ/UJ랝hT[;b QԥoPZ,RJ$_ VBRb\\i_ůh 1Du}#W {NP z.*+nطo@i Y,[DUs*+-g%w+S~- [[E"_X#I)tH$mvEn2߰2[Oz̯o|f zΚY9a6yODVmAMtJ&H-^ Qe$6vɳQRmFNxnDW|f;h-U#a/i4쫒\YZ޴g{`ׯ'9»3 >S=tCͽ4+!}"ߔ"b"Nvv t3v<)\ă"#4SlC vxwSY\bW{ݸ=3GzJ(\ydO# YtIJ#Dԋ~<ݸ8a>!qOt}1@Ceq)`Ɨ`p(j8 Il1' 砄S8 ev(*2 ռsB}ILK ǀ?Cc.nfq'E[@vR֮ !z.hnn3C*ma8%6wD43MmYYX{&m2<|,lܻ+ 8ą))q #3t@SJV9,P endstream endobj 326 0 obj << /Length 1345 /Filter /FlateDecode >> stream xYK60r Dmd[4K֖cA{9^t9EQ7/rH֨Z?=YzÖLeq.U7hn~vN sIT,W펔"*fm -iN #+t,oS\~1qW6+Op^wz眢2Y* +ołW8|r\W̸I}[*.3`XpdKoٻ˲\x5ƨc4/nB tsLT{.FPf; \w h%^9ucׁ5nTJ=wOvr8K2z-*۽0e8;(I!_=R㔴Ȑ4֜ydeدIPwe%)X/qWVFnqy &N8@hVZ7#P٨3h&߽2`PcAs;ZSŶk[צhfBrYX^})J[5B/#'>q)[xޢ+oz:}R>}'UǹOPgj:V:\MSyq8=s)X*YH}<ǸJ>YUF1`EhDKOF =J&vDY??y endstream endobj 338 0 obj << /Length 3316 /Filter /FlateDecode >> stream xڭێ۸=_avsEhH δX ֬Ivs#%L&)Q!ySd9I&zNtʤԓ1ZMr2uLO;Tlf:{܃@=o_k߁o3?n}$*X,|Y3['ԩ"xXXܙtz͟ot:\"Mr!a?%(DϏ}L\"];Vex/ȫdJ׃l] ֺhMFϘ_Kw~ՊP6 ':Bn[qhEkdْ!R_3X#X6x |-Mn-3x܂G}v}V Fۮԁ5 *gKf !Ӕ #ua[hAhyW׬vnj|lz-r;/xM܎PUtxiJ@2329pGtN *9QN|+JDTQ/I397odb'=CsczvM^9_N5X@Բ]#\x\G& OVsFf[7p"9=wI2;+ޒZ"37љ@ (N{4e9=ӄ,")MUwA}}\RNጪFDt-ԫ-%Da]y6l}Zn !⬦33Mp#KFk+oCTr]-ںCe!TbyyFj2]w+cI̍*lșS8,_ᧁ٬g~u;r8qX.=$hվ"xOOTC1p >$|>/خ ?T;e,>/PŴbxĖ$-qTYRvV a|9w*/ d#bde@|@Ew,&\P#OܷrQ}'C|f,'I ;\$z6Icm}6vqPKʷ8Tg v< gHqQݘf$J3miΊQd;t&[b ԹgUEW殶g-K{&J*+ݙ(B"h4z1fAm>hsA&DXA雀mġ+q YB=VQˏ]YG*k[yF)Cգ1;~>`RUifgfk>z{(~L1aM>hҏpDArm+$(9zQ$IaZ4:@ʜ~Iu`{J_c~ nv'?&i2r&@zGXrdo:\Js3A+R:>㐵G} Ez"k.PY+|q Wcry=E0c~UrFB4ᬙNri140XmRe8N/|"ioA!pTbPȎD%})lT">S9P!EǨ'R-)?.e@}Cu\@D37z4Fei"-q+ `{;(MbRaB*8{[V2_; Xg備>B CqQ҅=XJvM2j~mN}_ /eyeƻQfT^_62|& =ΤqXESV_Jt2m쇂/K⏋1s:3n}Y XˎgEމl}]+*cd֨Th`%L[Wto_ֲHM}sTmLOhȲ؟g fO[C_H> ';y˃dI)_;bwk %4`V>u"; ؤ:Aus>COT^vCL$u&cud:\`;.dTlٟtk sv=2 &T=u {6thty4=XqwMl4+$ &W)A8؏n=A'SN>pEQ>IdtW|ںc|0Á͘a5苆/Ɩ̔-WZz R.r^.j ucZKM1 fl7fVG6]K>aB2(s`VxڭbB(EzS$Xx6Bž,0Ww%5݁^#xUc *r>QuB/u' B?^1POeuD'EQF@WC<"{;/*Ŭ gr-+9H$̒1֭ wۇ'@JZ?`1C(}8 dSf ʘew"'.b:NHTzw1MA'npԓa2 gO`\?k5n YW,+mAIE\ᡚf8SAV>sKX46#R4|¤dvEB846W.d#QJ, 8}o" h\g /[yZ[_rd+ƫ哛C ,C`) zw0bHQ ~%xA `8|gJG)кBࢺtE5zsߎ dϥߺQLw7dk~% 텯=~Ӫ/ A Ad}BJ feT)Jm~A}18ǃ^\E i%AՌibuQp|z\cL紓AژQOORtx dm&DO!|w(> ;j=.OqTjn<٤2 ]mBHHx֐oO^|v#uZ:^G6@0盫g endstream endobj 350 0 obj << /Length 3558 /Filter /FlateDecode >> stream xڥZms_B3r!nR5Mb; *_}Ál' E^vvݣZj˓/^jЪlT׋j0oxd-Uyy\3kMNug[[tkh5!x6Y~oLg Ga9w֥LdG\!;5.É'J_\DãZJâ Y,'oVt~&,hva)M|x}Ü^uaYƖډrOLӪo+j>_vhX:4x3Rόq 1ҡ L,RjQMb]| k_^KxS 3V˛"řw="'$ORGDIMh[1^uEmaKㅚAji{CMSڪ$zpqo.H}Pq]־RmlۜnrȸȭΨ'P:{o#߁Hh!ȫQenq;^RI]VHaxw_](&!9׸~+;5}4g u.A7 j=c'y)ƽ.-ăJ[.uOY~}'=-Pbm‹30:k{?>i+WpiE{:zcث/%D|aUG,oK]hšOH Gud[jǽ1w]a/su/!gEsLըu/ܠDI3QM_DseieݤA2s1<~9DE{bcŠ3o>F܈n'5wjoTo*s1nwx0ꗔ]Hw#v1Ώ g%~Nyn 816V[(0O$z&IAjv(bbgBfkxM>Q;ۄ=xi3i1c2G3(9eaKܓȏxjn"*5R {sEJUsto/eyǴ2H"r=?^(-8jK 4na葯T\UH6*YnC|?vh&BٱTEwx@nFNOQҳLi.h"^J%Od'Ɂji[AH ]C>;:C*}&W%q8Rx }GnaJಡEli#rVH/QjwuDb0@iݘf|h{ ș$W 'nhWdZBW)G4a& Vl߳ϠfZ$>ؼЌQ %*iZh!giBhS^gZk8,bd:␖6iWF7NR܉jNS&cFёo!O!L@n v;DOhHuǠb=ʼ>tS:BZw2 WlR x 9!L <+| aدb!lnk9P shEFVf>rLRH"hF )8$B#= sDghlyMMQW`V GuK)@[ 2y:m 6_dwoINW/y؅N ,Wf٧Ċz6cHZd/-X(3V"3販SC>nRJ܊.0'˲7#Eo펿7R iݚܰ.FJ_(T=JGKgWST7U)Krwg?;#DhH^1=8rƕ^g7sq=qm!)׺Wt%W%q#'GK٬֗h 2,a`/D\Wziq]'4#9~oQ%Zn$pJ C2 2B+t^AcI.xHCf!{ vL|Q:&tN ,]2qpP;EipZz oT <_3 }Ma5 Ð#wR&T5mO=?njTX":}NAgi8JIe#Kn:mbE#23fEH85f ّWsLUU0!! vw=,N/Rs9MMq,y>Lum)k\=||UlQؔlD=cu( :XäC B2х{TywL^H4v8hߣt.bX`/]E%E1'GEL}զ8_(Τ}YZs H6E2J5RAv10dZtI0 Y4)>lO Z R*xV5F4| Ug#BUZ;IQP##ʭȡ᯸wRIxX[ӻg" g 0i6H)P d DwYHZ,CLs ,݇ly7C$=&κ񶦿{k#@5<%m'6Y7H/&tzH?Q/]L7eֵj;tц@Nѯ\Bh\&ֵCj*r< z 7TA.TjF0je{OPRq Gf79T'ŘuG4yuA)ā⌜Jro{z%%hz,fъu_% Zu "I\td>n14؂: j1x8un,9rO@??yrM ^" , ğFqy(;4vԹ0`Э*bErU?ɍk nز+e~?~P-.խ93l^JH?].2~O, $d6u}:ȬN">d5#MpR#H^ endstream endobj 359 0 obj << /Length 2348 /Filter /FlateDecode >> stream xZYs~ׯ@&x.\Vi7V*ݭVRLBm^KvӃK!<9dp'_,9> -CME̊$чFQD4yNNYcC3VʼVQ\A)@ux1H#C4EL!|4񮱋'`p͘^\#PTiӰ-`wq儊u'u,TlOr^~|=żH,EC~ Y[Ϛlthb ]VfX]1+>v0iQL-W 6ea\R[ßRJ8Y I ',5эgʻQkʙHS8b40Q15+ sشih?1<(gy+m Gjb.HǬjmET*=4#&hH]nII] *ܵ㤊Ř+X!Tv{1P%U`GWVaL.}LyE$kUL)QMP88^;);Fdp*X qg$%=~&g[<3WpUx 7uMߺ|Be3W[}³C/ƙ듛|_gM!N綀rZ'aD40a -(a| 1 yZ=i1= ;@oWI IR7Xj%1 7MyHLT||7=h_h,#}AL92V7n|UJcZEA&LquO\9i&aChs-7Z`R-fxZ?nH}U 1oRqpׯRٕ^*Pur[ὺlECny ׮kfOF#U#);k855u1(Mܪ^GI0dAZO,XN*Ҍ!M, .sg+Qt6\TVNULx(][s/NJaJ;[$™+Y,SfF9N+HBټOdBej'J mz&Hɶ˲/$al2Cg]\pmO]Y{;$,U]6띃 L`8vʄItmεAP V::6@*@ͤ@tF9{/,Śuc'_w-VVMl5R|'A{fRrUϢ,oЭ"s?ǣn}} 3ۮ~*7kXE"{vrVŔ&n\VdIVSaVׇ ^i-g OLihUS5l,6=H5c^wOC_gR36Sja>Z$iUG܍z_tv’nqѭŞqtj9X0l t=2;[o&z\vlv oO=ҕX{w,A>TEGOb6)$X3Y>(fF d (Ow>"ϡA= 7Qv&qkc{ŝ~NNmJz'Tvx)ˌ|vi C n ꟏ŗ_xYUp}$O/[=;0a1@xK ڶ> stream xk h7 &v1ZPpxw8+;3;x:^u~Hcvv3K%iD? a"ER$E,.gۑw?=y`<48ףގ'JQ 9<.NU#j ᚖ V >Ŗ8q3TYҡ8sDg~KpQtpG<& J(粈ˣh?EIB=D\vI/e\dLzM{wHK{)O[];M~ Z7}ȯ-#^^Γ2 8U7L=P3#z!a -Ds}ڡ:yR: Nw7ops WŵKK4h"&B/^UR -ڀP+o^7%{-#{m/LJJvj,C%{..2M.R5{ X-R"JUMlجتmI,V𑌿$&@hpKqS܉{HU'cȷU0f:s+)Gk0NeXm==Nv#>,V]Vf@t'B߹_ lKEI/$M) Wo2ԌgAӝ 'KeB$-N-@mvݞwW'Ļ /#ٱE2z&$Nd >DO_m@w8Ѫ]\ K. HKDDJ aSrwFz"K!L9TZš꾝~/D<:侱?$A |EUCw{t[-xz[]`G>=|5AA{,X杲oB]pÈwb/J޻ٵĖ-rDTVqoc4=4yF*3|ccU\Yd"6Fsav#٭0OZ@)|T_qdG{8E֐+tl9rMmO:)m@GrHpfPq2V 0Qi4Q:N!j*Uu9&% phFOd6Zc ͜ BaPL}ycL\УlPY ?[}`Ka I B Z+>7V}x4΄<NW&]d`ၗifIxDl9bV\PQ GIr"i0Cn샋4.3OHz*ZPZAk:DJ^!H߬Ӝϫse;vj<:wnl)& pEu*2%Kw6 J G䆮6(Ѹ~9_1і }C.:aCexMREaXd3vHqz} blqlwίʾ*xp%UNoa_TNd8벍Ή0ǻkpl!ނW^~5,MWE#n$d&ֶ4ĹuO)VY^ {>q=ъIaтA6g!m۠9kd,+ט->ܕCPoӳnG5Η81cOf}cTF`c;me_ݎ& U"F),|P[v|[Z)0aUmӂFM!Z4TqN4E |-|-IBwI^?$d~Jv(K#xwA#fn n Y@l_K>q>\ 88]Ѣ8oBvVvȷ P9rB1 IRօV& SM{>԰P 4χ 0 s >NÌ4rLPYW94xVe#e X#y.ZAGf0ԣ8lP4KuL!COҫp%Y)|Y6Ӕ-gH4>d Kq҉O A*F\O8r_p[p&sW"PeL4ż[_Vz(r',=J#Cfїm ^m\]g*h_BpRDz~cyo3CB-tNjgcp>mLỖKز,T-O"ģ\CH"r P; RĖ,\|01M:ީm-WX fZü1pe4Jvdn,Ц`όlms}T;(D+aMnzEt+dk~2ܗ;~~6s֌}JRVZ .,iJf '@ЖTm.exG <,e %%⼄xe)gl&y4"XFX`8q/1 o9LU endstream endobj 385 0 obj << /Length 3136 /Filter /FlateDecode >> stream xZ[s~PNEf:ɖ5r4亙%b"^BRNBAt2y ,uLg_EwsU>t`VFˁ3փ&;6<^snA ~~+k|?φu3Wڷy XQDrY+_h Gelq0^h 8պ3J3ZwFh<|U]o5͗E9pb`DC߇@c[ۏy{ UwȾr0t:dµYWmV7?~2f섢% _" -a_j(x *jNe| *J^/u㈗@y=OI:Oia7/iM\oeswԫ>UC☂:]!p=|?x&͓L6YDUfTТŸ }}T@#,Bm a(CA[ Z&@{Xsnvۿn >Պ[dLFlޭp״O;s+ {;Ґ4EE.q}?*zZ(0_'N'O!O 8WP/hqg^.ӄϙm cG1d76@H@Ɓ`cw7 5O~ S4%xk\ Z:P[bE{?hyJd4)FOdϸ)b?#si;ܲ`Z5}@lP!; mio10va$oZ&Wk ߬ -*H:KNm"bOϗj% a^,dp Jd^C6LպYia 6f#T7AW*H.m5CIvS#@ЊZUzL#?Fph°?_.w x)8'pmiD#]6TǠm r^&zB$8D&MZ.nj=b]8Ir&ٜORp60hg4g.$:`[.I/|{.ìjoDD18o.D-V"c lvà.8G >NaA$~o_6ExK|Eaԇ1l1M͑@J,nW .hx'^Y$50$h_tl,dłm\-,w؋Y61WλloNgZmK5a}p䧞c@2;Wޮ[.-]t|EǨ-5d1(o.ph=;w*kט#dN}&Ke+j +i[:@F( N vaher@՚{)s A2vb։꤬׳Rlg<&˖[65}79"ȪebJ7}hCe(C~:C| ?p{:A!  -ejNc&x݌E{25|MIG} b: )8u5 v*d_ @68K^ 8 9Wk}U(};$.5=~79=PhR=tG:\鷹N7 n|UUn]z9z#=nLP SښO v5e椱 @xˎ32x"z! @6m}TcM5C 1t3bku^O;~UǵU Aن5I33{oHׂ"C OOL tqk'ǹV~n9<ّ mr&𦩭?ƣe]*M5F;$!Q/b%圶R֊ævíaV2y7I")t=\%SrQJG%X4JgꬅOȬ$_*h9Ӵ\{mbo[;U+W9Q.nz6}v;,f>Ot>sZˡ-~`sPi:EbMcc^a)Gٵ6C3s,yTlE{x'̏W-@υU8?ţD,![>m (!?dza];h uŊk&0gN>6䭔d\f=L4wk~, dnPtNI 2I^J8VI~:󡾚 q!SpPM(6OU;%Ka l}o_,fMʎ_:R\ަ~Io }Z\(iq;ئUbVP8 v|y]S$I\|D%GԕJVgj!([.򖻼\-G\ 1 7Olץ ~'zjQ4$ `'GO' `CVicG7' ^Qv>è˴G"pFX-uqz˟=QtFU0^Q]-]I[JqRTґ}5B*YIT N.~A endstream endobj 288 0 obj << /Type /ObjStm /N 100 /First 887 /Length 2506 /Filter /FlateDecode >> stream xZmoF_Yk&H@{^`8jd% I٤lM$5ܝyuicVFd1ctʆ+6^GvAqQ!DVEX%=1%l0xL"[>  ) D*A-,/%EM .2Όgń iJf5&gyOl,d.@<#a2 Gd,xk[0R-e}y?ɫ~eaxbt<#?rUvʥ YW0^< coiv. sHԜ</xR"L=K pL ? 厽DX&D蟱O!Y 2@ EYz\\;pY4˵'>z1F#Ŷiy0}D4@IJ+ʓr)C.CM=QN }pb46NEy8 t!1L$dlUq>Lry(M>읁mwT`A`V\P)b] ;WggTOӷSUR߼.gCU?L]ZաDmYҎfzg@Ыd9_m"#``J ڵV{=,ʽ_!g7^V7UJTV7X`ՍC""V7k~l3 n`Kī_`fī_jjlx3.P*޼z uFśW·\_t?.g_7ϪD^[Ur%^056ZQ0#&f/쎢{l[M5$KS--KTH1O_}og_YH%LWcn٤ 5 L2=j9km"w糬 F:ѥHK5g/xy4h}wPPXOk@Ȭ;М32 $a: EwSl!z\_FnaNbaaI ۡE!Ѐ22wiAa Dj1h iVZkHON#s;ӌ-`1Z6x,dVo@fLzWRtfnY@O -eNSꌉ:XV1n/h[ŷm!&kB18hv;1\ΨC5QqGwmY̖ &M1leǚ\Y9Ҏ4Tj}$C)H%B@ ô>`0wsa p'{Z.X8"&kun+ P5R=]=i9+I8=;JxF\b_^뻋3D6OCVLYctDE=2@R~;v>**|!v$Lҹ%z8|>v( OڛG@qNtPK?GYBy-~p+0Hyg]m_J,ZV kc}9.!vri]ǹYl"Ίj RĨvbiLb[ i D-miv=PanZerSu~}ݺX>gtYUr+Ô@Q)HF;l\AY݊dm̬9uyYĽܺ5~d0BmwK kN:rM'is)tC|9?s.ọRH09yY9lЯ0$1QP>&CۉH8.fm<4a?AXv!,5lsjcUcU5]]J\|5y=ok{u_^56 |F4eO+0栂"K ][Ii$YBq^vԋ?7ӧOO/tv[bd|(G d:G.(I:*X SpezbЇ2l2=\> `O_2y|"d}DCH{,C q:qS)\Ya %dhw)U{hV%e7"`@Hd+.$)ի.W§(YAixH]3?;Up:ȗ\%_!7sGGXGi?ўpt;h|j r Jl#twͧ1?.Afyp7cİ#ql|a g!8lR#K:)aӸSଫl0GpwuQZxq`d.Ic'_z(k6OQl> stream xWKo7W,z)xi>a@m)\(!a]ˊ- 1WIjahc/`N{9]brA(Ilh9QCHPgfR!TWVsbܢU{,=L5L%[m`.+".YǽeF\ H}%M8DQ$VzWF"hJ-{>Oh/ E1xVaO,, (;iH]JPdXTD5iG\"ޮ:/`YIxƛvd VO2-}^FO98G<Hd KWxO! ^\3Z c$yeea.-[ vΔf[Y[`+@p *xg+^aEH}Kj nzt!dR,:R>R֝` u| q:yxƀowHҒ 3,x<e%i+C.ѦxMp{G57cXMi H&{*yf-™^0DK̅L8q0O|NSƯ$u5\ xM*fNjpB?N#elduPƊŢPt,XY-@%sΙS48EN/w *ܟy[DkYzopJ{BWgDjӵ! )va/v=yV!SJmڇ7pfv ^ds_;dW-b3 q\uAۈߢ1!JpP Fo$3vޜ)j_tȿtVMЬm)ұ;oC#L=5ouϔfhM)~5俊% /,{9z=VvN*i!)B15ڢcaF7,kfsd/>Љ޷{ Vlb11]/_vs'ީnLW͔!bpAj,]euw9"ǖM2Ã+x endstream endobj 407 0 obj << /Length 97 /Filter /FlateDecode >> stream x31ӳP0P0T06P0P05WH1* ̡2ɹ\N\ \@a.}O_T.}gC.}hCX.OȠl\=% endstream endobj 432 0 obj << /Length1 1750 /Length2 10766 /Length3 0 /Length 11876 /Filter /FlateDecode >> stream xڍP.@ww0w @CdwUT|Ov;3Tdjv&`);[gFV&>+ Jl KL vt :$/<;[5`caΑ tv`'d*q;{GK<ށh DmtۼDjv \pvcfvssc819 2 Ul ]0@h2&d*O xXC@`[ [S#%8@MFd,'XXvoG? =bk0XJRL o" XM^dH/Ub]"o7/,ik*ngcuvB z9v?;kekf0ؚ.ŞY"BGfvpps;Ȃ{u{?/x^@/^N@W0o#dVV) 0Cl"_q豼+'2G4o; `ey nl_?5ZZR{Z0?Czyc?L&ېoBR.6k/C v/k`T-K+fgm:gښ[}');T sVk1k-X R0e@V/@ٙ^06N. b}DS# `fs~10sDO.Noџ,7f0K n߈ b0x_7]?K|[AſK!V/a___%ߐryKR?cb% oyd{ނ\_.?vܤ`;8o j%rcܙJeZtpGD93`Z4iseKݕaKG86oSuHoEv5`[dr\xЕoʗBwTv?sɡ<0FkDQd}'Gpf$F:sǘʙ|&G9f/ssBͩ@ klKl?Yo8j]p!;G)-OvѲs]=nߣ؜2m٨.δ& (x%/tX].*ꞈ!Gwg2޵O8432SApeƈt(Y]ʺu\_繲)`SiKM.]8e+7LF~im}Nnu U +@7͉B Im2M9ۣ"a ?8Ž,rLaƫxJ-M? RN05o5,p2bBsB=!b%e|o/K-twe刲}?V t‘aQUϋq+g^EUѤ4`>X {uf@~_r0Kr=%jf2}LlS𲏥x#ExGɟ̅T9.SV)]}tiDQHgT)o)z9~g`b8fhl=`ZHv9|Svu"ݗ>*J5Ki&sx=''t-<+\^,ɵ2q43#<9idn;!#XH[gfyٱnTʶ%aic< f 9\HZ׊8r歂k'[MbEDk W7xuT֠ iŸǺp|sm שmΈO l,LCi=$*ךwKJk &K"'*\^%HR5#TwKs3sp0ad-TCr<6AQx7,W7e7p2L@r,&d0sΝ k L:՟_>8:?b#@}\1^K[ժ& Q?|S@JG\,ls4/ۨf^|ն),F(:RxJ1 L]-ndM)ͳqoߪT7p 1@sHUh2̻I^}2H}GK_pNMg|Y =TIE@Wg< Sn_(.t#Ǚ 4tUGp ´+-u*%F 0;)%[U~z7s_ӲvB}Q_VQ-;P}>r )H~ZגR 7R>-ʢƄGm 85x>TL! cVB0x6>o0GI:=N-,zpK9 <EYPvOTRIYܤ_md~zFg5"&sbM$5Aҭfwk|?]oqZ+N:{’PCfU–K[#Nm@S$Ʋj_h a16WD' siy66` [ '[U! %xzw#q\5i@{Lwiѝ|e҅8Gɽ2 ?B5/H]۬DЪxZKx0xdfѿĹZA7!o>zG"=I$ .F-p9vά?91c˦@ }m]ZnJ\%4ᗲA0<#v_]̤VDT;k7?kۥ s 4xp*rH( 82>3\3˦Z 0n76˖.C-/R+A&0uoKJ|ҕ1a\䀅 wc=9_G*||u&^{u./i,H /2>*qz/N%+$k`l^.Z`S4dEH( k)%0InmrMP6Q"kUit޺/ų$zJDkꔐVq^bS"-^)9X[зi'Z 1:yWZ:q`hn̜iVW԰-|=ߞA$NpI^}p^r 􉁐ѫ2K=B,օ2TdR4~ ,3+۝ô#['O;;p ]!lpAuNM#6|Re\ ^Z! ߯vnmz_"^AŽFŒ[fDdfy)p*lu,nxbZtTq>3cV%I [jyS|b3Ut=JY4Y rFE%߮EB=5TOmw򴁞!?d\]] G=!dED3v_$=|@`8 msr;U8jL!gOZd{! f_MDY\n _7n4Z0V4HBƽsY1me"~/zNeJNx}}=Rk]lxJkH;ysU`JݐPGԅr|dmTCs%[Vmz[+ay4m).RXUU;t.jlHS?NTn-W[E KAdBM+ÊgL2oMAfD6zфy3c_Sŀgoؗ->`U>AH5문9i+H5'uFZ1.jFM䄻Ӗ: a=ʋ}am@QVfX2.4Șbv$ G"-t)ݐrNS24wi8bq/D+], \y#wԥ {'ա(S\ctupŨJ""V,IoTЋﯱoz.R⋏5OGJUpQH=0E8U&l[A1Ro{(VW¡*o;FUL,0]{3׶g5. چ"(ej{vu2;4ENx $ch]ܰ~`jvET,ȋk!$J@+viuy"qߧz-Vdb >50-Srt!o{sA^>NQIl)HnDДxM^qKvo|>Pe>/\gZ[tHц(3n{$zR9tRh8_7+˶Yн"DP?ٵwRj06\ }|#:xph_x,Cӏeoaؒ;0*Gc:t9!= M~s9 \%VkKx\i U^eX3{DkܶnUCI4xvlepg5`>`XQFu8Q; &Z ț+=>|\V^=YHupV<&-]d2H/#)#S'Sf@18qp506*G•1>]56NKG/rs)tcհD<u7sOm܀w! .ɏ{yTܓU j̴[$WETX.ia6:CËS%|%2G[:K؂!&DT-uNb+Ҟ#(K_:&C棼)i:n*Ƹ61yӉ{,`l*ߢ>Dn٠)^#UĠ3#@K[_J+AqwLį i7bl3-P"M] f^)c<x sVܡ9{Z`KC?iQPu|gsP]t,A,HSl)Q)7[jEkArR?sXj;P[¯ Iy:hz.96ɈbD1Ҏ18땣W( ӾπK,вqarxM"ϭNr҂Rid/U 4J9IDhz!#1Mi*Q$-%Pcf{=JNhv[<]= ԟ1m HL u1&kAks] 77tu̡F|rhZ<51$ܤb-Sl3`8~ˍj_pꤍ;cOk+i#݀e@̤ LH⊀U 2:vV3A%DٕhM aMDV츘s"bvҝt\heT\%Ekc:wj*RpX6LXaTFy;PҺ?" ۏ .|wDl w>k[RP,LY|=iJfQU2jsЛdd81ޭaTRZ/i)VRy5دb"_a-'ȞEGD\L`@KoJ{3Ne|ܸ(i rCFF̯,f]'IcQ@_Cs4# ?.qX۫wtC`6[ja G$;`ѡJcnnc䟣גi38 vȸkҥ+YhúU\59)Km!Hҏg di5Xˮh&9`!$Y\dCQeð!-9'҃r,?@Zvl5'QnHgئ6+?&!\nLʟUf,0Qplc*ӆ|1`0ؤkjv 4#aYWpHJ䒠a|n `usHakHGcMҚ_T0y( :_5ht Օ Z;'̬[v$IXF,Hh:މ5 U&o4DDG|)`&]`K"f [-˹Ng>w}B=&^s,c{ʋ`l&@#TlRs}c%R|H}3+byԲHXC~sQBVKEL!OGoϑP1loT83oD&*ؽ >JooeCUR8ʓLﹼ==ѯ1T"b+m !/ULwUaGR ˟F*rbSX%=|O'{PöBSK5)^/uGc|Iٮ~U`f]U*`ܮ%&(VIC&W\P$KVn)= 7nvƀ &~ylZBM G(y ;ܽ1rkx.0$%FuT%Z + WE PuLҋ[$WLɎ S?-s2@'됾5Lds|}ͱœxJ̠Ljxt@X.2{YO{}K9.L ,$k"˕̒ɡD`=;{aVX:[.TTF[SE/w=E+q%pHxƾ-!L#q3ܢMDsNw`k%aXLD6Kqhwo4=5c}qF# qD.@kt*5yV—7 r%0k"h5 }JxP4"P<2; ߴә6_f-WsYƨw-\5<[3jEuBk"m BD{@ԊJJEH_lvf9iug}!:u,%횅] ' c4bq 5Q1sKVx<6)(oJ6}RDS (:w6Vr%E:hd $ӈY?-TA2T @6>C]g~܎ԅa.zCrZ+L\4^FGYY+E^EpehQmO-2Ƌj1d9ZB+3$u"'_3w3[RMKPQ,3vNr*: .D?؏W4g-='5fQm|/ߺ_ |b;ƦXR]쫡T:7Bꩼ*|$m/D;a&c0gR=aOQ=M K^!C(Ԕ&| 6i.hm7ϊKrkP4* x:s ޔaj,D?$4EUB |t6APE|Z񱡼O-=>Thԙ>]NUyT}1y@Qp[68>.>Ю9G /<{m=] xp(G+ gx)6f4bn_(bm/~Y}'* _c.Gӣ7Qpn(Z &Oxmq}Y%4ND^#4#Ͷ4gdt4&_d=RŨ|dK[EB΍`䳫2l.v+|a6`o,1 rxyzcea͇3%D UrPpXc}byc[jVh;#)cZưcU9DqzRo뫢{$d's lR%]?)_ph1Fo^Sv^.=S a&->p%rrc`O_c ͻgJ XYpяpi1׺H\$7wl=,X*nD a> GgBs䨫[=uRD01+RDR?u]qf mV;BцmHˋvBC( x˸PZzUV]ͩ$!16:Fas!z/M* MZ\{TVV!KtPfhYn3cm'0ԯpj듩q5- y$FMUEP]CIs9EI{&J#ܹnk*X1%"ka}?BRɠ.T(xFL\"A WB8+I"w]y4W|nbܕ,TE #>[H {3݄J yUgK,Ӊ|arͺZJ kBRwݪQeGܕ9NBs"ѽȧ}άlӊtw?DzZoGʈpPRr(-&~~P{}Y] endstream endobj 434 0 obj << /Length1 1661 /Length2 11009 /Length3 0 /Length 12084 /Filter /FlateDecode >> stream xڍP.KqP($ŭhqb]k^s9?sd&]z 6$qrqU 89l\htt:`_r4:=$ iW"B_ U!N%w'7O_!/C@ N 74:i+_KF ?˟IG+PBmA/-m% "P;'э j#CmZ 7 G5#:`k'x8-ANn/.NV WKv @_,p{3 t;  :Yatp=`ŋrKf v0/,d% qt9AO |9wokt;YYц3(͋?2'@^$vC҃3`l zAuzPWw?89V`K(dvBO1/2W~?>~2ya?]NJY]^x|Yy\NN>˃N_徜ӿJ /#cA^ 0/_tD+swpSGt;xm\wB^vMAuP.H:8nr`/ji]h`' `vYڿ\n/SzYN)d c˸^& tuz |9_ 'lN 95평%*@G +%  ߈@.!Khk!px1| ?N/!<D| `HP:os9^/my, ¡vՒd3ti_ﱐ2߭^K&l2\I,Q=#lMl{{4ךmC[|?Qx,YGJΪ#l۩D.+UWb&KSiVPWb'9G)U\)." y 3M9waJsצ~+bj8!C>fN% dA9T1Rȳtkp߼d4cJ$9ͫf&`+RHwbFSlj99QP&Lq>=ҎP;zqѶ9n=C9)dҷܠ=O|'~cβ-ԑJ煅}\u6:zZEp8m 3I9E ^.T)t|m,moJךhF`m98qX~p4i?r̾TRd5slmsj D-JVӦĭ|CϹRW":/?ˬ?%3 %ڧ8WaAͯM$ޚ1V8zov]:PLoIׅ@@16 27ờrm<W54^7-)PcW@?df^wLU؝'KG"Q21Qu,m*vI'_;G`sh8znliݍ$7iBІePYv{&{}lPRԳ{U>㒑?R*ojsR&0H"wKߞ^޳xCUul#NTM %L-˧+qW3 Sr#_ҡlG[Z F%!+eEe b2i;`~}[9+D7gb 9m氽1ozq]Zcr 2S"u0e#6slAN)[wҏF^c^N=. :V6Ke}5=r;:I*x;B+Q vϜ֧1`hв,צ+ /J[om{J:@+UPMh C%L: x+[eZXAM~Bb2+ڵbr/;oho!5րuYjmնxYUFRr;XT;gKڙ^WS9A68W=z;򍺣ާY q>@LÀ=u[oY6j}MH8dK p_GHmO[81 #`GLC|x޻3%!$U(!`K?Ɏ&yqB>S(}ZobT:T~{/Ӭæ\ڍKE9+{6[em|1w/9g kZDDHe`q (8DzcZD8G1hGy HbBMs˧SïP;EDq=pb*TԶoÏI>HT2lcs0sYUd(.{k+nm} \/$F^DԞ8MQȀ9 Q̯MJ2Ԇ2dCӖ7G "qlYT - `ޗ*^k&$ӎPj17[nSl2d3R5>*Z-wz֎dS+ˡw9+DqzC/|J-cqυXCFjT XI!M-V@ &'r019΍HKw7QX~btHMA5=ˠņ1{IrA>qMy絠9D뚶5jŬ32&aov4.ߜx" 0Yt᭖iiL( \h/ s, 7uM#c!Η8ix%$#Q~Z_w1R~.ͱhۘ4rZˁ ڧ$aLiDUqh 'Ѐ0e,R-|[!SDI@4e.X5s닃Lۏ$'7'?*pԻu}V,΁U2 XU?^恪7T)Y6+)Ke4ٔqY`I'x9$vND sm ?O/ut^MIp NO iswY]:z>x/Tnt$p@6 /6HxvVKU8J+H w: fVoVȂ@YQ)2.,FܪnH9vf ӄFlia$wU8BG#Q"<&ei]\m }jtLFUԟ?ɵ(1h?LWos0Kꬪ _E.۔ʽZHݝ0[yϪI~6bt{Rɢ8-W 0In!۩ye'=K0]F" ,0m !6y) &qB.6 ۖSz4:{3$p`(&/ӷˁ{X(FâFǣg߈br&EU^ 7rK00 Mť"N'aJTA vC tm%. n'm~?q 7A} >D?B*gLd`=aN3PBV4@&bwR| 9MV7wyA M~I%qmObU*1̓2|M1'ȭ5<訏}?rOCQ 42vj8f:xꙚ !a_sZ) K} f8^щT-?³T\ Ea6k$6|VToG6. OlilǽH|]RUrڔ* ϱt .=$uQ~EBI)wo`YG2tVbHc-q]ClJL*LYei>}),[ov#1.WV Pk3N,RwOIv+;te<7(_{ w\ْ8^&5{w~H l;Ͱg4omź'#<6Y|xRrI-pi~Lz;`dYZMܷîs.-*]̈́'#|BA)a㜓@$ .!BNqΠ#g-l]5Hw(Dm"OU's~ ǝ|n>Py/o7Ύ ʛ瀏١qx,J F״:[OHH | uiĞb]*#M-|hI|tt":.첅#*B#+EsMWxՕT;0B;^k:$+v+l|-XoYp/̓{} =hkCO@u;o:yoNDr3R )$0?8vṛi| W.hvKnY?d[GP"׮zd>ңm;GixX?|EIe|Nj^kV-!kGK!8ekZY#Ŏ_j^|o=a~ , ?̄B'n[BJQ5;$~%YVJc} Xy}d\EM[:;ʷ0ʯ79cROh淓{/\ANH=v%}M+u{KNCbEkE5l"OC}=oDۍ~iޡ|>O6q( EY6q5*Pqo(zq}l/D B>>rF]1l*^D^']>>wXs&-̴kF ~WlBz1!5N4[opK朴f¹5mHNws5( f~2a |/`{њd@y̷toR_=,XSc:Q g.2a kč1sӷ h8}QE nU_%X=QXFWŠ{iV+ٙi~lM}HѬ+tn1ӎE~p' /J&>wCPDA-g>HafY,PmoIp5v޻}հ Tl=/wΑ(-Jf3.t>"u%W3U m%L|%v]9 ՟3 Sa%<^:9fWƃ״JˈWEJf| rqB 8r+Sǩ8´;'[v0_+.8O, lvbK+K'b82T+Q9]us QɜbӋ7=xNm:k)XiHO_GK1\X+!RxsV5P#O8^nP~ّxMo}"IOҔNOr:Y~V> A\kX2d, 賩 UUZ.Gf1,c zаU"f/p`wK|kHR{]u &RQK^rFo(D@f&7mO*Dz}rn[:zs- {pLR|Jm0i᧴o\:72խbijb.芎hO75+*)}Tͪ? 8(ȓ>&;οFME>cS.N(nNxsl|q4>\?ςH4YTĤwZX.D`®QAb ҝ]u86iTEQ L5g[֭ G2C }N6iG"&9K:VʝJjfXo^Ff6+ Ja!ZL&߬ V.uοUd:o58Wx8 c44 9\r9cfRk<9Ső}cfEN[Ymaͪ4Iqn^YD<Ъͤ pOo4 !gKU(.:L Lm~~Xzعeo.!4ǹQ o0$1HeYW-bS#`RHˏ.s$+RŇ [[02봒7TP9Y ulj(1 ?&)p|.SȒ.>*p3ctV<ewwmLP}t-wJ7A9ogj)0/*5_!_؞pl i5/ffN~WŠf#Zc:;ZJYOa^X;>{"V&Q1$D0} (EIM6LuJ5*4{kfTlcZ>/Ƃ1Nˌ6=X> stream xڌP\ <8iwwwkq! .+9>s&'VP45813re4,* '+j@G ?,N2CwCY[`fd7@dHɅm@fNy20qqq: mN@ƆVe[cBP;9q30Z;:Q\AN%#h2@ojsʶN@ d q|wq1:޳%ev@ˀlhllkmgh1y1z'7'Z_V. +CwK7 * ɑdG¼YDhW}" {}66A ӿh81ڀ읁"yl\=flWw;JO󿏘AENDCNߔuxұ٘LL,\6FQ0?6ާP7T]o:"1g+T2Zm>N[ k 6T2:I']1oAb 7_/_f*:ZtLG]ƖׇL/15k˘dz_GS `uzw:ulDB? qD .#Ab0HA,?=Az _Sb0(A *ATjE\ F;?"w!GY1|/|/WqgKox&f Y9ۙY޻ ߩZ|'k2SGOw7?Qw}mћ$ξ;:MNUQ]n0sӭ?U ~ 18;wNpx?{\q|o?i|߆@~iKŏOt{?S<ڝa37{PWwDn^=`CZ[*M/Na L |SjUS<ޙY!Oܭle,x~Oq]l.J5Rǯx<(sƉ3څO7"Xx(BOM9 f.\2\-[i O$)ϒoREz|(߉;xF'FqPv+n Y)gwrϼ2;*vbyDTf8X:*0YRt&-/ syPŽ.2:iw{!v@p<\zMNeiߨhL0g18RB{T5 !m1}CW.8YgQbs^e9pѤL8f-m5*NʬWpܟ)d?V! oа ),)^{nt&ȤZ$dYk,JU$TD)E=>+~lS}j'EWv/ B3+ q;4Qˢ~N{OGdYA++}TR`2hwSeN͏\vw C15r2s3C"K- z^8MD]]t%WY*v$ ]<!a[1Zʰ]pxxؤWfjx|oeqЬ^6B*T{QkNWEJ'jm=o_R:X,&5)]. blr)+(^OAOlH݃co#BHB?Z~Eᣕ+WM Ņ$MH39cdsXzrϧU~6?=ιn}Cj QN+ eoUk.ڥA5,KŅW|m6ϓZʆNQC8FP.UgqVW#%*es'nMz:>|a&EAaAۊpVTqn'1- |WT.,'#Q1jXa =~%ҌT]14̇}{ -Tjyy; P'o[7%-2uהvgVAWsNGW6J}HmVO>;%#d~%e@+ʶ&mb8yaFg|ռ{vZt$4d|3)U@maNPtR F!Z&.JCb~D|s;-[Y,.$_hw6qNI&^زԗ )k "|)gvK\yk?.9DLvМSŘw+} ELpnsWGSJ&bʨxeǂo(u u@ĘR㠌k)t-='#8]H\հ{G '=F_Jiw+9e&x|ƽ^פ> K]B# z8םz9$.updIwD}8pMҞK`A"m&|x_ZvKx@: B̛ѷwv$Q2D FEZ3^o~2Ҧ(kU~ۛ*:՘j:[Ov3Ry{_ ӟ\t ԏỜi0LnwY돚$á/ (u|괺 b Q&d) ?Oc~,Xg*T=L[fE:m&g2. wwUUV^%ZQ*eTSKSw'gv*5*9Ǎ+reȫ2ڭkk#ejY@S9j8wVJc&ɵ{zYpa1r:xV#4ҍ$ ,}_88+yGm}7mlz̻0ĸjU=c(R.6b|Go;nɾYtd#.~:!V |[ri7P<) 5 '!!^31{}e~ZL #+UIP֯\vD}CwbJORյ S ,[uFm EN!:mZ1@d J{oq): !XokthR$8#_֢QަlI6i%OcEKr.;#jHHS|SNٟ4z)g + ai,jf~Jx L;5o[`(YHS1'SYsǷK#'Qւ|R`#M͵M[ҽFAvRnPuG)y Ӫ /+{-],1znmyVv(p\+ڽKskWEmp<&x-k YQSIDk%qjU:MjΞ LipO}BerR w]< C={d Fy;Ju~[6mPvr008ÉVPU215 Q}Q;K~} yg,'AF1V,aRu']x!:3@2Mƈq!V?f>stAC]! wOΡljG]^Ĉ\fp$` xparPeM`%3tO!nm (( ;ߧG\KQR9,gȉOӔLgH)+CaŗF2BTlqiQ$J=DßwM_F^ EEWYI:x WxAsr/Z20K6XQVB %}Pi%XFy_NI`ueo/Pc!QsF̎J6`C޾~-~p[eiwQݦVdPI3:QWƠ^^s$D[Mۤɺid |F|mXwa&m::ƾ*XS8kp-&]h}3{RVmDۣV5$И/ED2mP(![F?CT|WPI7\d)to\m7sC{O>QT>/ի|H$U+Kc8nJU 9̼́irh`;ߦ}Aڛgu-ePRs,I[ȿkpvbGuӺ0ةzW5Qd Cٖ/co_d{fܟMoeىJmҦucx|u1QgkT --^ce 5iB0Iz Lzoo"s࣫ZES_]ܮێ_{{F:>%n19cac³D]/Nޛgw)Hnʨ4a?1әáÖVBU8|ib -SˑnF}5j1K=<3BMd` q.s#RR[ 8 _fx\P.VL~6PX4UB~Uu١yչnu3\yC :YLkE)XԥU/Icu!x,3 MϏpUP̃nfrifvi֜~-Z=vxyDh^ _%O8*O ANX0~ & ww~MG XiT>; /1hO|Pм"Oj6XÐsZkFT׾wĤ-7{3vj2sΕm+:}FL(Sam2M)\I2 :Ì LE{K<=+]يvx|,j^ $6#z`Dh+ǓဘCqi镜Ɍcш|'/DgIYUZ$V^;R<#[v-'>/ 9>t4^°ZO CxW Y@ƙ:Ai> 0ʵ!6@A=S< uj~s6 "%eQl;œX--6wiӜv'R:;5dSrع1Ph1eQмdL=43H9)d$Z ,X =@Xa^rHrcl xNOKw\~QY}4`,8{CoRv'BK{M6;D.l d qz'Hy1$YGIٍeּ2Ã0S#"Ҙe㌯/`ޑl)s%b[ptݙXhO$W>ng9]=N )+pΧQ G,FQ!,\oi$٨dr13;<ֱ/%0QI pȿA1&iK|n :‚W96݌Q EJנAM&mMrɃi1 đA:T\yJ^_;J擨 zG9f ft_$h 8V@HFKKG)aʔI P<2o3'/q&pa #*dhfVR6-2u/"ف_̐ Vkɧ "]  d˂{ w*KXi*pN6㤍@72ҵN'M͕QMd^@5L6P7H]5*FH \k!olKg Uz3#V9 1[ݘگ T4UP‘iq`10-\(ɉljQ %ڐ^1j. -=t8OU% =x0ߨOr`EONdWad?};`0|2m<ڈ/1;ܿe U?mU8Bs|He*A,ƧU&N. 3+TG(8۳ى:?=N WH#ۗjf KoJ.0 8z.$b96Sy@Q_Mi<_Lz,w@C}̜T2U2 t ۸DXA}{Iv|AM'4r/b\) g > ><̉@)l+5\TIy)9WkFZ7o:1"!]m*KɓB]`!7,QZ%->>:"f:Pjorg гVzqTQ# `rS8∏HѐT$+gJƁH 6ΈQȑH-NXk|R՜/bl!}sn0AI<JdfW@lvt6.w٫LZ=/m܇Թt_P"$ v)yx~ MlKPŌQ(I) ?@Dל:. @A4;Y6b׊XSәY.UVw餌QM==)~85Wb9Ge^ ^Ecv h*J7F Q%R1aaMڿ8SΏ(\@ۥY*LDN\ -xu?rraO'٣޺0}?~aRbvR}T)"n{ 6"+P*_V: +yG+,a;:^꠨nDvN5A: iK}\&n=HW܄HEQZ5g4C=dLvkL9cAs2IߕnKsC%\2?SAZQ{ SdJ'M@P,\ZP6B)y=Uag3DZYbP<(H~:b:g HwPo~ARYͼ]cȌw5y5wwo' ύ& ;DZ^*\2BJwL 3?Kn$$U4ROj!kSeîpx tߵa[X9vA5"}J@r$nj-Iepd% 7w^l3t^6=`xz>ss4SL@2:fw݉"ihZc3mJy uve'$L3.Dg/笹FV$B1fa)k2agјhup~k F[dDG1_6Ҟoxĩ#g CarP8o=xH00ߧ_ԯ$|㎌s*_P#X#$.3qU?KPa>0=OoȇWZ VY*uX#_ Õ~T4hrգZ4" 45] ߏ0%SQPm\>' 9[/Ƨ1jPCy{qo=*RZn10QJ衄v93 2GR w$*%tXRŹ%=x hȋa'e 2 aa?pWLϣ;A8hiE$fG~\_ٍkK*&f{^@HlȊ 2 G#1xF7"iׁd7ju}y5S+u;6Q˽V1# Hw3ii;1AjB,Rw/! Ytׇ"iNֽAQļTrU|FˬiݎJOKZ5<"C+tϾAҲ߭DŽۍoZ7FWOܓ[JbڕyI X jF;h8ֈ`"gwie=VDDU %mB#aRɀ!]a\|;Zmꕴs@Fŝnk67L`7^JpW2s AYVK({-hD>"G7 14eh+͗5?f|f|\!=nDCvnhe7NǴZ}lg+.N`ү{m {M73`*F.Y)V<4WŨ qggR!#qpƮ4}0%i>tBo&T& rBFYWϋ HyT6g8CJS٢y`F-! H y*Hԧ\R瞘"Ecl׻z4!ѩPj e^hK|=J2I*y^@o4f ^ *\R,GScºwV%m4Ai; BHKbcsnkz.] DwG4/1CvVPuĿ?q<61zG+PIy!e}ק%9Ϗ )73ڧ`د]$l sa|dn+st8ǡ`&\(YcoӿZ#[D@)@w_2D k ѐO}/ vR`_Q8:+B'IMtUSl˅ ׍ |/Vf%>]_C 7 FC9pq¬{УtMLbQTJG 'aUک6.]u!zIǪ˱u ϥʓ<7gԛҺAd7Xa!3"m=\ZN{t{)/w A0%3ZU"#~(ɀeP\P"A3yI-/"_isΒhPIt[A L C?<.&[;CD: Jt>ׂwas9u $<8֠&9}`zR?iIAkrZ|=$zqZuDp11!qT7[~~ՠ FJz h ]yrꀸCz@c{n8yl=aq ,r]񑏡j%h|)ku΁ s\S^ ,?YP9*JB/K3 \yS|?T4ÏA,w'71owZK-2-%z?'xBgn})P Spk94X!j5'zcOY%mPUd`BTy! ANK[_7$wbGHr]tFU;!^r5/@S-l䯴PYat&ח>˹.6c9xj4q4oO~Emu-Ar6|ݻ93OVup2 &oCȰIk|ɉhZʍRYW)@AOkcXTuY#V4 i1˓LSUު6Oy/aD>b'r-K = ؏𞘬Ok جYԴĈ4yE\XX28RYAc<ҸyiC/p]N>qq40k3H;J*3̙JHIv)( (pH@P\ U (9g65VfHaDcШ?UYxOj/DZd0uLlXS|]TgPI?x"84"D4|߫.5cSYP|)&b,ighmvn/א_8JwΤ`R R19cZfu&N:O5/>}p (J{ڣsղ4EGM=.&V!ھ}a;%Ch\Y'8i᧳YzOs_}czUT-/oR`s8*Yї݌0B? NG6O/M Rָ-x#Scq GRP#D%ؤVomB# 9G_tcItWyZax͵Vκk9{ >P~`7Do<,KH,h[7<v)-s^M31ZlAjLSq(rG_xӚ!8EUr]*Η, ozdLehnZ7l`ijA„pY2|xF֓w |B'PYfÓmqK~9תdPG9qG=|$˗G ~WvED9l.CcϪoHoJ Τ6/#ճi4ZD'M? '4%}dyM!<l7Wq=@+[ۿL,FTyѠFl MIk NLBڒ$5': 8mD JHVƕxufL" %ǗRyc=2/.-[:n&R!-F醖M\6"h%~~&V(02<,;ϭ8s2ɾw;!|ѝ`_̗B!dz0' F;^e@o3Q74<:ҹ^LY;bu?p(Ot8PS +-I=ScSÄN>IpX@FO$DpLdA93􉔢B)HR6A{@DKtIE^ t!,N9 Ht7Wu mi*  se7#֕[n*)Cޡf2J4lx%!v"Spt˜aP SU ɳ6v?{ nsw:W U%ThyVnr8;aް%CqnG1l\Į6M+Y,HD 0H̭bn84JwG;\JZ͟x@jO+C}Ŗ;R ґ)2#Pܪ89{O MJ<V0iԧ>G#h_9ԖJ|,ZNT9+J77ʰDޗ.*@rX/ B"#96Ů}o8##(@Šg}_3`JMŀ c KvtRoRwxJZ#yrzͽ|<NOSږdo Gl>\ë |XhI9KɌSUMz[*R/S:uv-^)ؐ{a3k˽q7 :,vsTmO On|Fr/NYɌQጎ һ90f[K`* 'IAe{}dӦ>W&Sw_=w(* 80$n'ӫQU18|:TgU#a ml. v n e6fSw[)̉VځbR0 q`V̛Ha՘W\f5.o8 T釾ѵӘ~SdaT8M6]qz0YXbpŎmF! Db5! $'|w*} K{Z7`7 $>~Y{B%Rn%SPn3j :'Ľ&+xqXf/\:J洈Qɫ8/?3y'~KY 5oaPaseg]8DgS^ftM-AƮW]^iheR yneoMKC,O{l0Ƌ}߼1_j5TBq[f@!&&?V~ -d߬6"\ %8ca#1FRTwuO [\X6in3(,T[5؃G:E~> P1y$vddn'ݺ%ҳ^UIèl; m| q5;KF`3^ID^pm7AI؁]z208(0:PoHdLH$j1;_Y^nX>617A6XI`K+jEۣDP)Pk)Q>KuB6QΤӲLQ0*:!i5n:sJ6TFuf9C`;A {6#OS4JVCV^CCKC&x6Ig;igzmU3D8M B"?3rgQ3,5*`-ΫVYX F9\ :wko4΄DA0@b{CEvG9ޝM endstream endobj 438 0 obj << /Length1 1492 /Length2 7410 /Length3 0 /Length 8403 /Filter /FlateDecode >> stream xڍTuX[DPfF@a`TnIA:i)9߉?3]^KKaU@]@@Q3]5qXOa.п=XP3 G .wTuj@ !Q  @n0K:7@:c1"=0kA !i(`ݎ=@A]_tD? P)x7WW$n쮵`N! b?*ܹևxq>>y`&Oᶩ(+~q̗>#6~L)l'5\t*害+jks>5AN8;dƶ8KCy&k7nlpLraa>le'Zi/Rx~+DR8dHluR>3uȓ_Ű4`pԊ )A/HVyHGp 2@oڣv.-<8FKP1DI{BK#r D6ߔUJ}RGN ^m!~,sa:*RDww[ R *?{PRyR7\O,C^k2qPw/|ՆJ߃Tq^X:䅰azo &;`UE!Kijtɤ\. 5Ue=J_n!@e"A$«Hgӻ0wsMR2rV-ΦdM+`bl&€%kvbT:`=ʳרkuhm>Ք ]~6gRx^pR[ DPg,#||A譏,͘#/)el~W[Pz70X$@ٮ?.Y zJ0\1ЊWwg:"IbK_LvɷN3] ۪hAU`b0yfaeu:5Y!1->mЙRu ӖB)>=0 @9S6hD]=]W6:7 jKn, o@gXw8#񠲃}c8b$oUe ODC'XVG(a$)iCvڱ8 Z@*žH1Q 9MYG݈ ! 5X0 hcrL@Z|)A::lGaV`OB7)J 6^eP9-57"6`kw-wR@I: BKZ1/Gu Kbe nD D("^1:dj&2=T \)Q"Gd]k kd-Vq[5341}5L|t|.7٘=yA֔}|?If 7` }hk7g2m "o}V Oy%"}Bh+Ln1>ϭo_\4H5lgiAѼM,dlUi (^ iq>xn,2>6|H[YX >$۽$zFՔvnr7K۽U/a~h#$b3ː]dNK;ye+V$cVM;էz KH&ш9z[ ] GvMr*G3Ř+$Noؐu!dzZvrbӾe>c|].p k].. ^S90&}þ!?:fKTg1<5ؕSR%Nr7"x6G YATW~q5gxA2kOo12nL BNN|Ϗ@aShԓl{℩n) WK$G`ady{،.-V_P,1?f(mHLIh- w5,&dw{Asى- ?X˞.aq Ѿ]=Yv᪽1|]"'52hM7Xn}}m<)R0"0wԲtWlUmFxLi)\nͰ}8O8+K phY_XhCdzXSraV`2cU˅A)et޲/͡fYgvO|=OC6#ql},ہm Qc&c^O@.3'`|NP4 ŰrAE MTGUEI kE([(eC5 UŜ<P5-/>>MIO򸋧vlP&-vnt[:w+Œ3T6 zz;~QK(*N IѐK8Ouz3:BSzS(qfNh||)IDܞ!MSB2zqons4"ZqSe4:SPO@joR?N (5Kf/(R) Ou1mKܝha-}8q*x>%OCtR$MC2v7Y@S|+Y5A᷉-YUX:tzH%n,&= 9tK|PM-3F޳dN1t4kg sMiyPk$;|ќ$^v!Ĉ{%X/)#k0!WXujhG麥0M|caS k[Mr'9{kyDf+!>ԗ(4I'V(pY(RK噎bGv&ageb&>/nxOg %5>s4PiZmaB/v81=VhJ +ಮVQׂ (ts >7$C1~r)2sjNR 0澁[ZNؓ<,5ddЙ:Lh囋ŝI]TIX܋V _v`\8gKW5uoc?EYؚ? SpqKtݤ.ʝg晪LODedR`WozI=UȪKw(͖}m5 {pwKۊg;r2P[|Wo5]1ߖdu*J*j2 c@*l<ֲ%|))izv~+7Ȟ3ELFz,J8؄j/:kP:sv3ιo|!4(Qaw]B8E|L>Ρ-O^1}uݽvr#]YI4C;3p*B{aD>%Tڵ̄ErNF|!_6ܔ^Z٣fTd&͈7ЦR+e!WF;mg㶶lk?Y AZ oր ꈅS:m7Իw?aaMCˎX,GuQ\Bs0] 2|hýUupFXAgQmn|yB1^P}^IsX23p# 򧀯njBVB*\/PO|6VagSAk֪tڨgi__6e*HԦhBfΚ㖵fMd 7W6ϭ>6]Ǜ '(H^Di`4Yx8irF: y)rLX@o{}2mI\+^z楞lD~ ǾjS1뺯}MTߛɑLX+<_D F.驟qI`TxƇI1]qc E hQ0-fAF Jwf1$V8J_ysSzM}X } ZطK4B@59+ Sa^BlUߞQ3w=Ʌ:*,R4~9ҾR2 )wLаIt,oηi=`\g$EP*sqcÈ+ |)xòu9OC%imJ'JPgb;W\MR?x'*0^aŊ;]d7D "qtZ %&BC>ˌ p?aV{`ؗoWپmRO},zImT&utQ0<緵WsjٲE*dIBJ|H ÜoFoxD$ʵݬQ*œ_5obSf0}'4Ə6H# 3%%{祊O T։U A{Ywr-.u*ś:6,+"̆ CJ\n4x׶贩ބ]C$sfF=qjg[RGQ{2`aW/|(d~#B%GLeAT/ Im ~zX% ]*Eΐg5j$5X r4&9R^IDE2FNޜ_}!?%ltZbR mHbP) V̮D7g&''8vDvjer12!gs`r|:O- TZzV<}G%>-5+@u͡cc_)ݠQi+Q(a"11xyѬ42|`ӎ%ˣqou,5*˕D?*TM#=?hT Hh9tYd2;ʩӘCowdD$֓O_E\GϻI~2ܰd8X`%+y !p7p)bAO _ j.&1׮>yN:}B\4Ss܆hp㉘Su΋ _XA8wزnv*ηnʷWҬ+áܩ4 (܎ YtaVr:E_ /r6KS}2_P LIr7XuukoJL"|蠣^O .Sʍk.4r$Bx/lWuZVjhh9nF3$e!]p\Ȟh26bQ"'Z}«8NB6cr|a ˾VFXͣ$Ma:+nTŬB;&/%^!*FRB/wj(qJ :s#OQ#?zYy"2 [Y%M rC/=f %Tw?#ozF-Ol: /cN߇<aM6XX|Qj #\#zX؏s|r=j- v6ShD3nr޻| 8eơtq3{X6ս5ҳHbI:)JܧOע`#LjU&b4J K| @:b2p'/=ݍuLi{\m闹yQߠzv冪nC5 j;2!Ɍdמ ? 5D`ANXWڦl iG& eo[_˞.aQu|(M7h;{W`sYU}x+u,[GVŠ8s+5YƧbi#E?t F~hWgl5ˀN <5@L&s!?L7(^>vSt;#W'sőJW? xve{0XEah⮁r0CtRK~\"Y#atK}cƶh [/sOr^bSƿ˙cwz)I{Zp6ST?läe /*IxѨܪr5ZEmRuf;;T`m:-qt4!Tڥ~Kۿ#,*Jc#HwnA kQ/)xߵAe)ieFpw* 'R#mE`a<,/'P){=Hr!n\va`otk DgqW,MHY]4=7S3Z/ mASC͠w #|TζWj)~05-}~Zg#TUZհ^s  QOn 9|Xeh aijxى/@ 9TTMRP208l}Gb~+e% Tl;zm͞}:猣S9{>aWq?SX-Z>PY֣[ 5Gm d{cOVؗ~>OnN~[ VKs@A`|є9hC>mE4Ԅ|2z®Wuw*Ҧg半$hvMݟt03+.1$\$i$kT xy$Q8sDE(T VwWl'F~ԉi%Sr6$=e̡}^5>RH*=xQ8=Rzlp@֓eYi?dHUYwdF'skVDds0|'eϧ"KiKuunsߏ~+A 8SQIs0u9c4N+~PJj> 2g!WnBF f*DOQHD杳C9>c}El)yy6$B| 9sOyg3Y_Gq@YCkݔ Z19S+ -L2&F/ެ oȫrkg %'z zQ>6Zj> stream xڍweT\.nn$8whnhI.;AGf̽x|U_ծV3irXC-APW .RO+ˏĤ;`1``(D_9Tu( yyC@w5@\^0q|X|bb"d@0P@N+ZP+0_)X%pgqn+7ft@ ;m wsXL=;.]  `+1 b *4A?j8mtEN :9!^`-h*q= 7 }@G2cuj;]]q rP''>y0 d^<ۀ!6[vsч]@MXlAp//yZ^Dc~>Pgc+ ? t07Ͽ`+8d ` ?>QgP??@M햕z|\B>~A 3iUXe  g;R_\GޔW[;&E7G?_ /Σ} R A,}pT@l'+YkVv*OsC@ZPW1|fx>q{] 0 q:A Cl0G+"Qm="?HcX xyS9e|Ld?탕 x9!M&VܽN:/3_‰^MqV=O{EBb0E6yK`V|NmlʐRm߫`%!ƶLJC6CbsWEZ[^3{t7Zs>jeFWe/al6~$6g.{+\᳗U Q_w.9-(9 +f|Wa퇵 w>Y>V Lg- :*&ix! ([q,Z1}hա;lщTD{?qrξZ/%ܻ0yˊ=%փ(b2 A[MzSVbpwe %NʷI ˼>8M,07Ӛp[D%nb}@qעӵB.]]qֻf_h#o SsHj ,7԰DU^ǏsțWϋc'?Lŗ 煿2#EGD}*iYWlfX1ۅ[iʫ‰G0;{nb;š`7@C+-y̴'HoyJ\i\}dNi2 l! ]޲c^"O+yyT%TS2AM8M&Cń'2} oM!߫2q&&?[(::ihOW_153m"D6E~ͧwsG>"ӭjx#oq9rhͰPU*'C|m|^)R *x "XAe_VvH[|z77O2 N`E1S>r|jls2]tsV|4(]y.#m|O/qC]SYv!,»RSTrE{-ĠHKmFZ\GlITC4Qw[,'FMѻb8:c:UF|^3[Q\1z)].ݢM85DxctPbh !l4ci)v7Q:BnNdK#݁@P<5챀-;rjYO&]L䞏5"H\t /]ڦyOm &FٔJYOI[\!;e.b߃n"6HD8Ȅ?z_Tːԯ¼k^bg_6F6 <k*6ksR_ګDp8.(Co.㖷MY5Pn_!BbGC:>kU+eme4뵅^s}kUdWH *a)uy#GuzTw6"*e> nɤ*:@yN;Fizb5i}<yEK]uc tbyb8<Yc~pg.Nޭa)aX Z`wI >Kq}$[σOO~9,̰}4WMpI~J+*ޘ_qp+^זˠW9~"YC҃FK3z#}H]QN %v?T18ң4ߙV{^ZR(_fĜ5ɯqRQ4uJA:{{캉nA6U׀9zST?1g^a"=3Eϵa|*2Rb4gy'!eRpF#\:7B](OZYT~&C'p:*&Jc"+st>PJB؈|3z5i kY "*V͙3ћf*ҜW)J͜i鉟[XpK7xd7BS>1.8a) yþ:ܚ 8\^U/AnSИEȂfo&$f|J]1 U5ŋ*U,JiSeÅn  UY֗> `vi-iar< (9cexw,mG Yn;vnF@cۄ2y۸TɪЁfqLqD ~c6w_"YEzTiZwh=+]>7_ d**D$Gs:We*=G&͈G) /[zn|MvSnF#0z,3=@95hy¥?~>,\9ט-F}؃IH uҷ46o>;r&zvy glT: 1E6ASfܶq« M9l(CEc:,F09*;Qb=Z6,zOBH޴+Q-FZU\q,a8"-Wcz3 d=8mNi=KLN n{=Dhr$ҝ)Ft|]2s 7+'t0VLEl!GPB/ ҮΕŚ c9j}k̟/Koc!χw_=wX *g&v;A6+Ijres.ʄ][cxXEdܭ'΍Mf;n:$_|1Pq' !D۝lU#pL:̡|9b3H]j*:#vgmɚ9hZCZŗ;ͧk ]u)H 'x'rqL8S؍|Tli"^: x}|H!O~T@t3EBrfbJ撠U3nd2o"띁DdR]Evp<9n'tzȂßҢ7]q$]\WqGԀc!_> 9ݴ7l~Yĩ,&PI V Zxho<b !zu/୻aRtb'[CBޏYu潟|3LAZ݊UtII'X郫Qd_vRRb]Tk5V6V*tprO9xʤDV8BW]dlKվH#l+j)gðœ> ,o9mPŲw;% wK',]wռ*^,2FfA jONN`a؞kn~ Co.7QIZJ?t&9T "⽒R.ѾLk֍Ay-G=?6g:l/h,Xo"9L4Ԝh\\5"품zȽ ^UZQpOeGwI+¥i,ϓ ̫#_b/ 6;yN.Ro K6*p T Y8?,JƢ1/X9$Еi#DЃRnD;--Av y~>T|BSWOUλLUyLk RrO,o= y#_&^\dnڞ+麕H85kkψ7s|l5J; zuRru# jg%u(׏F;~v+4 O 'DѶug;>E,L)N>ٛns= 潴ܣؽW 8!BgL슝ev!^}url, 0>#p@`DopVtzQNU`13RE-?sU5 y|.ȺIk}LUoU4)hF0_}LFM-e"ѳHS/o˽1ץ j_:U[KdTJ9so:y6].[Un7kv1| C4N1Z艸< "YO]v뤪e2yS 1׶6ޞcHu C4Q.B)S%+Y:%lzKTde{(d*mdG*R|_$^/cF*%PњaG-_ VngWܨĶOOՂEt}|n⬌Ng^0!:m6)Q1ǵJ@)x"U:9mfDkKeP ;;jIOL)S ]_1#YZ5 ~Hz4JФl!ikŪ ۴$1w iZ&l#VLHe\,2|XENk5*/3}8ѻ)"$ʱZ"P YAkڦ ZtߏI,,*^牐Q곾4v}$k-2౾ix{ il~.fjLߝ4l`އ76go9#~%pXȼ>{AOvc $z'5J M!=[9 ?*r,3q҉9.(}x<k/^rl_tN!`MjG$ݚ}ݗ1?TD4s]gTsGjgl~iGZUb]Wm,}9"YcG K+om3yXŠyxxFP!CWԒ3iʒb}S[4eM#zDY.pgɊa\u)WwjHY!P[ȶ2 DЬ#Y" ɳɲзy4,+9Kh8FosJfHIpp28'}UQ7ɫWZtm%ҧfwi0 "eIc WyEKa4v"Jv7?TՍ-8y5zDך'm3Mm[(yjzM٭(xe~m"x%lvo;`sVuVmVN HA|Sci*omŒBBCM?l?ZCNIpg\E8>g@$!"[l~>n(xJA+3C " g-m§XWl_|*"{xKM4@t84L}'/6kH|( {U3':!Rxj$̷+mdCM@lw[$Sԭٮs B2[3%*8M!$s¬MWh\K7!E{=?*o*.XI7iןҧcB"Qtsr(JD!-t<76ɋcS/d'0y|~G [pBe1q,\k"ۚs1I+d}xY۷wBAA3a{, h=/䁄콶A)9戭fO՞!VZ iTŜ1mjo㗟MlP]Oޔ3;M<[d-tH3e) m{Lqχ{bANN8hOg+ʳohcQ'/ҟrW5Цn5Ƴ ~ʬ:m$leP buzc>cC/(W`Qమ<-aS &$B_JCXbo%]?iha@9rW> T-5#Q[ Eg pY|j"m b{WYךR}V5 N_> stream xڍtTZ6)H!Hw 030 9t7J ( 7zkfgV&]^y;-DC$Zʦ@ !+!;Nj Ax@0PD@@HtL D0 (J$I jhaBVE/D_w ` B:B\;A.8 ACG$MۛG8pHG>~Ih\! p{7.P0NAuMY/|U n /@:*|H$Ex /d&nP #nH>/ʠYfwuS" `\g{eao{Bԕp! H(m` Pnp7=Z$jA<@^ @o+B `#(?a_k#> _YfLչHTPP^A QPXD &&w!]O#+YfH/ 8L'ߵhBqU:;RtqsE?p+m]O$z a7jA젞#Aq9sPN;嘿Ff =^/P@0􀁝/ږ!z~2 5h"%D_3z%@imd? Dp;"@`gGB~€`_8Ak؟JāW KƑ@ pTScxY=oލ &DOR4κOWdkrP;mxQO:./OmtO -ޑo'k(o݆٣)N[Hy=4XMf#w F!eӬ93̷\>OS佽axM T2_L<[6{@gNK}Lf 5]dh)FU0.BC ~UQHU 1J}۹w\e>v$!ūuyKYD]5S1w`IsxI6O+u 9Qĵ+Xb*:朊X!Oqzzy? /[<#Uj!+^ĻI#/PK] !#>JU:*< :9+ nʶsQu bW S8T3["@j܄e!1coRCW\.Ӹ=H]0<*QN=EH 粍z||zI˩[W+HҶz*f3~X4ZtqVxɸN| \a9Wj}6ogH.j1g_45᷄?MQF'yRK[Gc~j2  p-78 e_ 󓹻u[T{*ۋwKC"%.ùV?ɜ{t%ײmINHX0>Kx]4l"Iq۫a RRID9+ݓUdrp!V}#U0.> yAftB=H΀ݡ(*A;twW'5uR֯*' h&pF70cW@ū+/(å8s1,옖#nI?qm58cڸt~%;GX> : z/syYJYbOw]/w -wz-md=f?qKj7{j  ۷PTEu =Zr<:"s\-"y.팢Խ8o76[ȝ):&iUYW<: .cGlר&_w# I a*iV_aQ H-%[\X/ {m1GcP$v]5Nke.bc&Uůd%%O3$z]uB8Y|w@)&%v,iۜPZycvT.,EŒ DJZ2g(?] "2yH~Pֺ֤ $bD>ԏD^׻:tYSKP:b7 ZGl#zmZj;2ejJhqLRDr>= ~[Ͷ͛ġ& Y"$6}ч.~]'$H{4Ѓ,;rVw0 ZOs YY4 źk]vr]DwLw*Sr]҂P@@GrJ5u/*lIlMDlz2Z)Vm$,Ͻv B_~Ԫx#": xڞf@th#)ٴY4ʔ]h:ߟ vB) M7:X*;I?][z5:ΉH[*%D@3Խ9KD|&(u`^:؀ǫNtq30+ՂԻ{A=_ҧ1RxgEc~%o4t0 TE~d@iHFeD>{(_dU }qH8~_Y_aBrLKg4tbԖgK2.*a[o5Z_g0V&u{~!94I0˿λ =H'T*jgz}'0`o91<i;`~@u%0wUa=jhKb#.mփz*;6ۅ\{@O~6~ U# $baL|vt1 l7ý mUKо2x/oH=co; n6#bT^5@?> IR_"X߰E[ǩdZ|ʬJA>5'wc{$T֎ۿ6aaĠL1 F[~Jl`iՍyiQX'cl-6v?0}}fH֤BR8Cd,yg:X~ZT½;Q534 zk <̩ &.yt9tԆZRF}ADNVĭ︊z:;HA9|W(͕01B6;=rxx~(tؗ RywFF`P ] }n+ˬڛ(f=WQƘ7 C1#Ͻ{F@)g>v]5ej}h땭 -WϸN6=)Ze8d%~3r`zz^nI=Lˍ22!ے¶oޗX,7jخy.me&F8ӻ0-G W8`iI0m4wO^$^sBF|"wq=8<W{f: --߫&nQ4RmQz)5_sа٭;w,{"DCs锋éSzWZpJ 2nZX7Thu0-&39CIJen 4Mg Ͻ9bV|+)#`exrHZǣoI;MϬ&vWqq@I N-*O {|$R|F )Tr7#|$#2͢ /=:j( 2I f1^і{'{#dqhSZ<ѕߚXy(bpm>ZdT[Rz5/$ˈ&kY.uaR]U (Bh1SpdCOd1\b8*^sp%q̎[2bP*i.Lc r68]qG$nc'dGJ^H^^beI%F)\>[J@2}ɋKaԠܬZ#۞lK+r/<~YMgxcqNm5tX%!&'mC<zNmS/m0sv=#P~>ES+z/Xw ܐNOS3_GčܝVe|g0|"$b pXӗHjkVk'T=u4㘏bƇ;é \4%c gFJ[̺;'4}KIVڅMQ?JMN~fK2S;Pt{=vrۍAボD oxlQ |[&QPj9˾d3e5ofݒ[o0O?x)H**(y L 1>!,`ڸ.Ks=:Y7R]~R.Quߒ#Z |! '7eJY r%f6,5Mtލ[> Ն)κktc|&]+ױq U̿"7GxD\A,9./:u#0)HWuEu0޼a~y^n3?1~uxQjl]GMB7dH'8!]擼<L?K 63yt&e9g1;1>]guIӦ,4D/Cyz Wth=y;i8nG͘O5C5Nn\Az^"E33[.Cv-Z<Ȧ֎Gד|7ȴ&!E'rNrԄDx_^5A|gwdď6QssN׆[L轥Oq1SżUHvd>7"]Liw3sf<,Mε*h/oBRj(qq@e#C] lyxRҊ8zj3쮲\"U1v9c:%aXf*aC'RE8$w94*=[ﶱv-|E>>%}}^BI:oHTe)5b vh,Pz Q,g]L]Y=LNR03,cokeqWb.RdŜtBX*t5nV`k@̍n-cZqN',MX(e^U=! endstream endobj 444 0 obj << /Length1 1528 /Length2 7340 /Length3 0 /Length 8374 /Filter /FlateDecode >> stream xڍT]-4RE@${"%$B $wiҔ^Dz"E^RHƷ|~ֽ+k%3g=g=ag}d'CU.>0?H@ A~HqQhE EbJ th;XT@( h4.p4 "` Ey;Q( Ý+B!N$x+=*zzzCH /p2@ 5~v=i,]w0P]$kI90??G0 E:B\.v[ŏ .Dx@N,!y=_(+͏F8; v]`Hgg M>% 7utAzm"\`ۀ\npu8X俘 n{CoWNoۃ+`mcH|8ro P np!ov, ƞ? 0a~dU _G |e篖TP@z|| $$>;#:bN^F]_"kBNJҟAPYw"w'??~3/Vh#T !ܝ׫`A+i>?HOVAxa7=pN#$F@N{AcGAءw.P$  (ě{XK Ǝ) @~$EH~8agoDD}a7?t]{@X]absZX a`¶,mq^p($*PrR%דouP`7ToQyj) ՌftXҡMG$k~J-KʺMэDc8&) $Qq1GS>S=A["֢^UJI7}LMo/BcD꼼)zJYI;Ktu-C,ˤ$tx6EvKj_U-Dz}- VΤvQ{$Sf*YZ4TNZkV cqg:Bo̎a3#Cݡ W+?z؆6?JZQ{ŵo9{6n"`.fb(S4+dJ%(HH0sG*Wknj[eK,ub˂ܳ]0S4:FyȯkBYƧYx{qy Rz-*49]pJtr>w#[pI&,djrHګ1>#Ig0xAOz7i;i OZ|$ckEbU,\/%>V|yOzBЋ916 /| aBV ˛4Գ݃g]?Lk\_EzN՞X*u8 򜓮m^Q9&dgdX$aByg/g'%y|n4!w)[(qYǪ5 jRr‡'㥟m65H9h^<cslt:g:pfL_ Yd=M3zauɉ|>2kc~Ŗ0vH-g$}}Eg; 243+u9_z;/ǖ$H"evIđLtۍCs"0k3y!E0gk#I\Ubcw^u-[B)^e6-ʌs+_+:#VA_ DQGݴT*ړnumB0EzY"W2$SM&yfC2o4kV$$2. #L4ϼ  2Aj2 Jg+e: >3TpZDb`AA!s:[PdH׵.gT;ﯕj~OEz3jo rwEyݩʾ/i7x'Z_3ce\E7.`H{@$hQ!Yӎg¼75تn|/4A+*|l3lhY9}m0=6 2IWtތ ,^;sgXۍFVs}#7b UtY,'yҩ#P1&ɻ;;-EDm4[Tݤ%2]..q?:*8*>O|![23~\jC4|>@'ebsqPT ]Q09?z7kt.@[$$lN䬦^(Srlyה՝XtpN^sܼUõcDlpbq%s7{{b>Ӈ_Zu$ﯤ$.ndPT-3]kOysG]~xzAH{q틠+"v3OjPX[J\}&{óƃ(GThg7*ؿ^7kt#yys^:DiiZ&K+ʭۺ9t7Ztzn&P(g3S#+W6OJH1EriZ3Ar̂*"É z.$jcֈ]e_6+3b6c'rh;'^D[/G8T.ζ AvCVm mu)1|r juW4!mF.5!`Ex쁘$Y`~/4>sf^^|]֔MX+TZbe$sAk]D񵨪Ow%(No٢g~+xk8d !fK 5_ 8V)- ˁW cUQƥndeIo(']X|j-s%PHаI)ex{:y㔋[]Eu{e鞐g (aB?y 9W͉RI&MFxq7ȻeCZLȹ3?y;{K(%Rzc$q|/#sS@t7dnn6gE8m5;Wi|QU6ʿ6R8$kޞ-L iU?DA/Psˢ-r׫hPq#)p X̵fj'jSZƦߢMe(EZsZVtMnFG-ϯ|6b6פ`+73rzn\-EC}HCwv`S^:)F+BlohlUׂ*J28E77_ګ#A "׉<OH-)xb:ʌ޽h_%pPY D.'RtMg" 6ѐ?O.*uE7ҍ~ǯiz_W^c[6ݷ@B0$fԋ4!hîڐՉ=bxp*}:h*?*soɋT3 !Z*hAg˔e7Pj6܅bv&F|3ł$bƢXbyۚ$Lo_+0;"N- &qX *XJ M㫂jQSeRQᾺu.:^Fq{BLBTۦt#7@hX@`D܃6b,Xj>o8[ԑÔ{mM'퇉 MQ9ݭ2n)&pPaO+Z{J--Q*&qt>B]\p/ X1V$y*7|Y@W,eSHꊩy;?Z14ѵgbJNi#CNy*5xySd7Џ!4iOլmi4aXjT5!abՒX+-ׂDFj ;xc#!OhHvke\߆}|6(m]&J|'+X7ug%?N8~^Uq1^aGHupk~L!hɏ^g-~z'rY|ӰyqX(Q>ֲ8d>)B%3*g}Π目3 .xW8` XyASfwqB{[Cg}"&[$qgʷ՛vENx ;"b3#vo"֮YN_rU:$>Pl="bU( c½I _Joz&F}&1 ,JZHC9 o#uBK/}Ҡ#Fk":XGHVS|rli?(ඹ51KlHS_%8FVJOp2Q}Q7p$d7GVc"Yê=*[׌b=5<-IBIoZGbHmWx{|P=S8>PWqOu:bnsoB הxˢ~3@cJrhSEl31+O"#rO̾4M#|_Td [1=I}!{8ei@6%tp{A4̞i:1:Do+g6{^)wg WqR7IUÌG&u0zخx ڟȷ;mÀxo4DSDFˣ{˦8zSΆnO2j5"Y>Eˋyws=& |B%2ꑟɘ]f픖3ܗ8 ̒ho+H1]6LpR!U|8[445v1n61+ )UE"~Ve^xf5L}LU7R~&αڃ2}!^"OzƔVmA7P~+HkiXJx+L cM77:柷ȩLuÀswvKQ3W%=!1#UkbB/řir%00Qk(+IS|-6ObN!w14fBm!r2]\_۾^W޳3?Phe\EK;j,m;pjiSZ1HҾ-VkT"h$Q;^yVals>li-3@Ԟle$[PrW痕HV(4WȲ ;JO+ZP ]KCZၰq ~X Za9K=nM|?,sei 0oYYK :sHhe;܈S/!UoQ̡=Sݰu bLh"y7(0A=J}PђMGK<ԂWAy1Ϛ W%Hg豙b=v̬{P6*l^+eM ^o䁕-Y> stream xڍt4$jD;3 `F{/щAHB^ zщssZYoiqKZ!,r8H* @~ Yru3B]0\]'Aq8@E@E@(/ E qYTyJ8,ptآf ?t,!p*e uDh qh!,aP?J٢PN"<G$);hBP75#d<m[_ ar@h G3\VPp @ Vr'C,-N' n9@r*<(8 |@~wIj t9;O X,Q h7^ `DsM/+?5PT73cRR77 @ 1eA`'WnEӿZv`M[(?,7SoUnHwwCahֺ PEuojUV0W* h%Hml aH9 ei3:C!_t_1,$B$ZmkeCzgpK/'>zh jMm/B3.#_&^?NtuqA,e>ğBX7UKRsK?Thz%;f5\&7 3kQr>Y>^}T)pHnEɭ$S)wzRt"%.]hZĭl t$<({ҤZ]FX&E:kc\jqDZX9~{ROS(XVFFJw*[+kEC.bjREvuJ$LhԻJ-2kOrh-V$b 2@dĝ=bOrWt4θpMdoF YwXi1)~\}Ƭ94q< s['ejgʍ9B2Be. V2T2L&қ N7'L:OG'fӊٲbN\o7{4-i&g[[YOex:Ō?ȃ*. 6S Mz)ߴM(E;?q r>Bp+fś]"0֙{mKz5`3beI N{V /d]ZH`:2b[EfaTJ"YLDEȰ 䌾}1Y|s?S껻{ Pl;fsղMև݄ψ5Pέ O-L";χ˝\\! 48>ƁQtUO_R SNTXTV҈8w۳ny똷Dʮ:̘sd~` a)H:NbS\f wϻLrP uXUu9t[Uޭis)Kl:fjXT߬Y~Mj^3h>= WO0`(  Űb .6VlH0s[I379ĥSqa+ U{ccipB.̺VCJyݩ닐լj[& 8m.nV39}/ǵ8lJ_'ƥ'k=؄f˖)b=G^{[Sv ^!z9Wk1Z-p P5`oꈲ8TRѾMe!6H`Sb u.R(Tnb$HY%"~U/?GC^nm^ ֌ۍ~?9r>Re*cC,jAiMS_nhn._]mSD$ *OH} 2^V,2.YXCO|Cl*~%Xm\Qbٙ(Vi 'F̞-)E($tq+/bl+$(!_*9veɱֺ A ϖkKU*7ZEփ,jg[2/)U΅#n=QLNɵc)u|/vS˘"/2AC /Yd]MԊ4X"Z" fn9H0 rKKH;KtElJ\ u_!9[")w}L<4Q/^"U>m^.H#c$;7?6.%Xq-`cp6(;C__%FIMF ¶\d ĨzKL(N)W&}3$0HÓ H:@DZ2ݿkQkyq3:O͚sGV肵z-wˍ9HQ숈C3F3u[pfIdtQ`,S,o: \ 8]o:Q;fV}fM59F'4gʞ)XQ~);EZm-[4!7FXp\p3+> Lca=TN\FՔ)>ñL#X&q,f *>^,&Tlb ʰpV}ޕ)x؜4#m[ *21L%mudfVճƷY, okpތL:;|BHٓ۹mWp-7 b (;wmd_}EnSrHyD`JO`|5;j8qRGqIƧz2k'Mp{Ҕ) Dˏ iȂY9$ɠ .ǻo'eze? PFKSD8J6"%43R>:2x X[-8 9RHPN{!6cݘ7}rQ9V\T^{qh3uL L"<]ZFu4{{Н տ f cCҸ-TCⒾ%*3˻d?jݏO]yh3`!eX!E!w&u+y\9΂PK?G~l$`IyqKVEYq_6Ƀ tifku9f8o'hׯ,t 2~JV<~b[͸11|mۣ%nNa%uFSL BvA{5Ynqc`^@X !aOxy9SfZh'a9z'PKbD?oqw[dkX$<~#W-})̂gv_t0Ťߓ岌.ýDe`U wAm5^ϝtFjuzdD)i^gܱ ~MZ1YO_(Lk-<@Z0b 듴0Ru"`݁+p.MEhW Xv甇DPIauB^<&}2gu/ wÈF;3Q̻Gtd;I?<)\:zޱ5ōX"%͵kD&r24Ol޹rުeAzTɅi*zP+skQWs( =Y=?r3~d=g*mdnڿVWRr35++9"CajG^'qh>LOH,Ln$rle^MkcS,OZo|[fZ|m2+*ӍT1xAPnJ'Q-LRMS񑼲7r+wzG0=!J!6yVпlT Uߍ*^)lG:8xAѾHzPrFDJ!-̲TL<VD&EDO'Z-y+Ո"Cu?;!߻URA!ÍHg]V8o)f:[+u+!&~7RPo]Q_9>P:N.:ZMJ-HaB1lͥe%7꼽"-daR SV5cV^1T[ ďHMO"QWIk`wdNwϑsO7w<7̶cnc}X:,vڕsl7wc :Pv`bclk:DrF!a endstream endobj 448 0 obj << /Length1 1530 /Length2 9293 /Length3 0 /Length 10303 /Filter /FlateDecode >> stream xڍwPo.],%Ͳ4HHwwR]RR(%ҍHJw#%?;gٙu?םufQ[G p44TdA|>>^>>~VV};w8VCB?(rPMpBT=$,Mtrȃ=U' U!ߏv$&&;@j#`w[CFsA}+8 vturxٹtnPWO5wM#xqXvn9ܽPn"|<PWCz:@'t ^?3qrt#|6 RTuvֿ`Ӄ?l[= PZA7wa欀srt"p~'o < : ~@kg"@]Po-w }g.Fv0 pw#`mqXAm8F0Ca yd 2k'_ Tҕ1UNee~< !<<w m_ އA]_2`kI8KAPb7<|%M鿣KR ?`G;_z?B 7:;a!d6 isSZk۹Cl͟vq!1a?;d{`WW%> !a㬡E8?J\q~ߘ('eE?HDT=0A|@C>CC> &ZC?M0?40( r< kY=>2ZS S}̴LF̯%]c >Eqt*ծZ{s ano]'3$7_b⡌`R ࡄdV|hSaq;y:@+g_hwn9fgY4cGV017ֵNxӖ`v+!2>TyrMW {|7؆;F}j'~"QA%*&ӁCEC0W؝FB)%\oN5&ڬl(+u))Q^P I?3n˯A.bPg]( .^GL\<-ԭ(LT,$3}%%wplB%Jhڧ%o»2xwЩ5/op]d@ mCHyIf lz"sH8p4%LB^d>2YTō^&B$w|9iu$j€AVGKixtmv{!louDѫ /3,U:jҰ٠Ee۱_i: :Vy: Hȯ탡죙g)7-LYy>N)i(#S& ?̓]x?K i#֟o"!1[^CkOP=0;S`sC}mݘ[/&Յ,EoB^Ȕ%#BFB 9 HD|φ ,P&β\}3o_5.i,)ޜFt4=%= =9"rAt wEǞy*]=)}pZ8+Ib6g:o: Wx4eODk71sr}3Gl#rZ3e]IQjҒy.;MJQ7v6r;7Ѱ-(QH[47xB_l=yBJ+*PpKvC[_0ua\䖮kZbSJ~v)LAM'Ay|`67܃GkVb'7@=gaĕou:ޓEܩc~2ƃ}txq(}S?1\( ~;y9ӢB5g7Uh!hҥZ@G"mO(U>A~s?}1\1Rf.d$HqB*ܑH^8A.W7Rʐq ] +Ae}e" CqlliRGLgI>Ad'e_%“ ?4F;T2;#mvn8`#70T7"ACDQ%Gm|Z@z'SF.'O~[y#29 Z {T*s-9RrӟCB)݅%R#EA^cMx`s0*4YNKDK'rPVsG/:qwyW.Ӕ}Nmʁ<)mfѲRjGN|=/Oϰ"v6^PDe9D_h$Rr}\R~W5uA ,׿?|1?吖NrG(14΅B6y2-ir`o0#UI GԞũD+rۓY5?.ڢf DТgҽoc1$O,I*LN7ZG92K#LעufeK'`'t\XuBRwH`+ ߟv0P:_gW*?ci'Qߘ]nDK5Syڋ1Oܷ2.iB9d|Z6?xk_|YA0hkJX]A3P nO(vf?i׾uݫcU*Z 8L@|v XS;}|]EG,cSSܯdǍ Fs2Dv4,i:Pt2Y RploU2j}zͦ,Xh3#b{:(s=gbaU[~;9nL̽?}V.5lY+Wͫ1D@%֤'MV4 £!E9ؓf$&Q#7_וZHᥓXNXumooV_*7F-$S-=Q2 C) f+i-ǒdulx lE,bvи}pQ;UPO&)u@ŦFeŶFs'Y2E[@ЇXJN`comC#U{$ Yݻ?^NAYQЕjɄAq#!11oj<4W7;_J{1JF(4Wm85cL2&/vy<<\ls9un}ڙ=j"50q0PFN=TA\nA2LO,iiݍdfapvQ"v|3{E 2vod ssfM5FEn 5 =:DJBVL@&ȼqk7+[ z=uw.ΠRLM)ӈE-BloJMtMxfJKwJ8[C9$Cߤ` 9H*rbgpt*=]!jIs rK xBQ $Bq3UcM7{27s]N$hj{wצQ_*7~{؁:ԺGL3i\jbE˨"Kh?8/m4l`Y$t{SX{:;T^(jȼ$m7%d0~L* ̛ FցrTؔ؞$ L'?fD3[mX4@9L-O`qDM7bTi:0T94DSJBY.3r* k~||X>8A]˛hGz~'&sE]p]uaH 0W g$J>(n5xB(h}C)s>*YyAi:΋'Qp yR>6Gw5)Kމs.EґNG'ZȟV:Z8&(k*ToU0!]ܭ'VTzN}g?Y;#`ٛɺ[`~dKQmd');RΎP*&mR4f[1 !♉~ȶ%0Nް3f trFe?(^svc ${tCN%SE7#7j{b]w.G ߇MGlYD꼼I-X!@BW SKܒ{׫ow{ "wN.f>zN #DGkgͺk^B%/ܹ(QU7̣%P]7B(R| jQ;ɈP{43_7qTqhe~%3mj`izc!= Qh)V5"M;~~=^DZoENj>i%dhg^ы7^,hUЉ̪n. b,r#zsl}Of5`TߊpN$G4 ϕրD˯,6_"3(68Cl !g;'ʿ] u8`i '(`L; >-hpnRd?xW2,ZqUT2Š&j-\hN )ܝ̐7 ~:ǿ e91S 7It1"\Z&;Qiu@=З僐%(Hnn6#hXmo87:GD] #[tSOˤ14'۵{ɨh^*떽qt_/ˏ~E8r TDDO׏) *6;$k &jua9t1]Jf; 8Sdž77kVhgNgF[rbOV35!/$glpamN:2Uaә6X٣Ԋ7Yf=ݮw 8xtjK- F[SN<,'A̱NAE2#EKǟ=" F% GbwgPq\@n6hqI+a|JxH3Uw~:qhtXrΣK۳sgsl>kM](~+rGb~c2f}${/޷'Ԯ/Hl/łd_vz~>khL!\N'jjsDN`\s0y@u ×D SF.NMN×1VǢiB8),V|CtI(?᡺ezQ=}S(ܳvMcc^Rc Rn)btxCL9# jsg0rTэZ۹@Sg'mͤEh/KQV@Oܘ 9=}vNɁQ?`0OS+C3x}-IYvK4(Tg0<ׯUy-:nh#o峉u|gL K,ΏfL|ˇJ~21G|[y-Ħze3]&myd;~ag4|o|,Y=tjsynaF+m+Qbr8)'cZY{DS6|1*xr 8nۗM[*?f Ќ>q)cUُVJݭ9d $8`[,e:P/R>ز-7DeiYsg b*[L,w;nhV}K p}WfioR p4!):m.;TSE\#w#!.>ڄżUwOG޳wD"o&1J^%X{мF tv;bI6ېkws,(h23G-295Ϡ+ڤ,kDRrB)cHw̈ٔg)2Q^,rWhߪ{ E\1˗ EJ4Q&pg%CxW0ԓ̯־^}~`뛽i=T#RZ{sm%ַs_t4d2mv1NU6DLg $vJ"ZBpFת=:%~VNdBƔּpAD)XLVWx޺̓RI݉~Ghs3P>A*ͼ-P~HJf>2D3rmvENy',@Bu ~aWqu%Vº鶘KW#rL*M$hK[d{jR%,VΞ]=NDHB~7߾(k rn* S7C̮323'⬓U?RW/-Mc EӶ;+&{fEÄ+uc]we "xe!ۆXkEճL^hO:8h\^aP2;DF<>$ #X~V辑c}a+;nPS:\i̋򉚀+#}W:T$sE"-*~eEU[y#& t(jNm~ɷ)'ڂj+WEMAUnnD=yBˆWW5ctŊ s6^\N;G/h[֏hx5 7m^V!)k;!T97+dKi ߗ:o?ų9XDŽKgyw)26:BErCl>l։~:뀝"#ݸjAn0ʙ[D\vFgMZpɸ{sTNa{oAW@̀ shyBN>7):c̕k,v&K`DKޯ UP;oB잛Yԟ M_etNGsDx IǑ o{l[͗6=+V3Ϙ(K(.N^s#Pϴ2To5ͨ46OVh[ J>}g3 @C=\>ACbA, #J;fh \gޯNmedo rc R>U0ETb8S~Ѝ:OtȈAP<fL񍀏5ɜ{G[n=&s8E~%wܽJn'b18@}wa Cb2'zCF Qpˁm12H~x^6/oG{M@],=d9HF#S6H@7W>o59ENL&zEI]iWpYMˆfp㘎% 6ѠI8{ xfDCR[[QF3:;wƽtCOBZunT}KTޑ+O2M9Nx+gq*%I}R"yl..W+z/G:0[]tIבxy'~&YU%Cc cF|F2Nl`D(+Yi`UDOf[r#ěIzQN`!+ ,-ZX O7+Ѳa,Vh,=j128~g{3Vs ٰlo-oun3@o܍7`ހ-fu#OGvQ"%I"Җa\r g~^9Fѳ4I;Q^y4{;זV܅X_y_I;t=V9nG\]o> stream xڍTTkAPnfhPRPRA$$E)%EPBFqsϹw{׬ͷ~v|paF J(v?aw Zp2cVE+H |*O8 {~".Ae9 ?װr1A"<*0X>G8 /Igc?w CP;]pcHPo8 Z $ձno{_] ~ ?oVX٣~]XOMSH (_@H $*DA)K?Ct@@bꯙB,R@ +*'=W7ġnW?,u0X蠰b@j ]01 +%#B qa Aoԛ+ G?04 _1`.دK!(8̯{iñ HD%$POO) U="Hp@yg(@2#miH'?`k[X|ߋStF.2{/*+1cf]Ōb=#0YB`7/8D#]joC;2l7+1 z,箪hAMT$q~A G\T<ܥG0+︒'r+Y]_1uZ)ZSL'M7}Q 4ج4>߮z-(QMˆ^p|d2U)(O%oQ,:9kYS$L2{^UU>RSfl*r%e:D}]e5>$}VӃQ v޵ѐ=f܏>?Wq=3MRM eos.oIePՏ<=q#mHgl\$hѝkP'23qKmt1$LY@!12.3.|ҋ&_~3[Ʋ]ZDIWy?6_6]%]܈@R Mf# m]X eE1~@=(Bђz܊;"fVK}vj>y򣫠 { ѴiȨA|j/c}LA{[Տb^$03K#֢y/UG_MB<3lqe^~ž^껭NjSZоN2SunizTzy\yȣВ?􀔓pėJ>UFZ9v$lW*b(\/Ϯ^0)Uo.沦==<` ,=R>r=uk$_R'ڑɶAO^R;nI#ZU޸m&TxH Xۗ* dDL6ߎk&Zk||"ӗ9jKt|4aL&5O MeF\h{REѺS>c岙dI=a?1S =RH%i!2xTCJF k*Y>OSaF~͒A+EIDz2sabhys|ھ6܏h sFɣ9hKyjnFYW@hǩ9EjaH|cGTD)_z7Y"{d_T)<d++,K]/đwb!pd#6usTLJ,hsY7t" bv%ܴ~ZnrDJ* &[OmGtFyH% {O8d /k[+/~c=]%9R$~yuŤޭ׫ǿNwR0|Ε>c~|wa| mRKx',iP:bZ l 8:wDŽE+b/_FF#+ ϊB=,PiFYZKWR:1O辥mhf*Iv]ܤ[9D/5lS;EVUMÍ"wkGD k..m6cyR%0۔8CMD q0{&Ys2z#LSnjszf}IF Su55Wf\'溁ףQ¡r2ͫ6(v6Z+ƀˁesF6c21?7Sv'(ѹXj:u;2*[]~k_ E9zD|\@$3ʈp* 44~LHY{0[M}#{fQ8VI|Ƶz/Ƃc 2ewYcՙ^}̦4Ko~lFNec9A!]ILXĻc,QOG! w%FރxǾpՄ&KvUY!m>#<%ӕ c-yѫu*@"d IQhಕtqqvw']QSgBnS >g}[-40TjkS+|p2%Ӽ/NpY€tjYB)s[ɍ gx a;~ &Һ.j~yt/L+륅~g#77*ou7w ϸh5ȇ<Hўy\`o7m}8{YEnhwzmu@K&1,3%}5oc%RƹVQQ+<\3Yﲄ?jcZ{-p3yJA4t?䉓u 'B1GTj59Xjdבd'ٚcnwh:]9 nbٱo/")38 vQd(#5s>uv4܈}e"e!\+*Aa=MAjvIzikuEljӖi:g҅+jp}Jī|]~aY|`@DC2Ӭ%IÎEpÝlKnį ?0wb-YgzJGY(pYބY~Pc.V9Ozd }ь1Iqg)cKxo]JՌUb:eî͆Ī0u՘=>4[9eh`}c\hIϝFR+StS߃ЏXF}hdJ(NSdL!]NPLA¦=3]%u$B>ƿd%{dt>߀u}KtSQIeBOW*󁋻ĚċDršBs(k~J~x[UKr$sSaEnXi/ 973[7ɘKa1qV{uC[%ʅ8y4-炓SI.CXM]P{(ZןЄ~񶹊vd{bS9ď^\&$ q>"GB̮ͽv[íBw늭hS8?X⪆1<^mi!m _5-n^6>1ˏ 0nO|-|7d l}fE|W.黇)<23C=Lf qwlHрsh 2Ӿs+`|XÁ[]o 78%ѕ&p.p@c endstream endobj 452 0 obj << /Length1 2685 /Length2 23536 /Length3 0 /Length 25041 /Filter /FlateDecode >> stream xڌp] Ǎmضضc 4jV1۶ݤywӞ;Iy֢$UVc5w4J::12TYY,,L,,lnv)5.֎|M@&n ;G`ca ཉ9@ twtvrO-_Q{@ hhfbPs4yO+77'>ffOOO&{W&GK!Z@ t~ P4S<%@ Y\A@(9@MFtX{`ebo{d/g33G{'okK$)0q0mhb7031 @RT`*幚X;2Z.wP%nvdm=| ,-~aĬ`yodaae@/3+սR*urtX[[Axn.@߿YYfnS 1 @{^v5_fYii-M]*NL `dd^2n lbo8X8xa j{{h}Z ϒp^Qߖt_ @K:G8_S-?G4vZ7!:X֮^@sek73+v*;Z~A?:iق M)`h8&..&!'t@-1 pt=Q.No? ,q ?,_`XRYb0A@\ ?EqQ@\x@\ ?EqQ@\ ?Eq@\ "^4@fElfc 7s$@?6w/'? _߉&V@m2qA o#/3;i|,_i@d =e-󷹣_@AP?9@v:e h8APU)'2'tA;!rv5?jP0'khg48@@J?S5 F@=Voln@sSGy_15VPp9ws9='(+ߢ@=ud'-fkA}tt/_uy{Av!t'<]@#s+tCfKf65mD <g(Ri}\%Vgo܊& uH܈,7׽ oITi}{6: 8?Qx,ZGGȨ.d !K΃q+UW2q~OeK|1F#Z?d24k ƍ ev=gD3=I {&[ÜWu6N< <]\")*_dYҢEf"$UFTCjkO==F~&B 1v5b)aU}pn $:a!veaB;]Eұ:l}s^gr?8U"'LhJ'a—A]6ѩDeXHy6  {g.da%asbCFgoj(J?恷TR5@ta}РXHX/mE!֑ygP:wh^NW4)Ow;s=pԨy/F~O ֳ0=C:2CkW_NLE~!Hjմ)ZL=cq28 %FShDPXF._D6m%H$X_y#V!ߩN^8B'3WRδT@Azޮu5l҅)xy60zh;EhI-´a>Qc%_b(K䑆2}GF|EEf^u+冉O93y<7/=GxlYrғLKZ_[x ”q@@1+Klڛ@Y]bznFQ l}oCxWFzI?x8X爮&QҚ]Y؟5ӱ*kfoq_̈)7[Z98Pn>H_zCcOc [fn+1]@ zuSG~ߵQJvCMMżd$*%&Mk6y␓a9"dryOhYVue-NWƬ6%ύoEs9Lz^knYNLzjl_ne@|\+5k}diN^x:RkUcX9FRw[G4ĢvSƫ~N]+er㍤rhg}u+7!+U $ n1;9$Aa#$LW+Iofw; e,_]`bR}&ڲϑx͚J^5 Aw.Un1T<,8'_3X& 'g,8NaʩSTNPM(v~uޥ!$[F5b>U9ل=f`a ~^,h'Zz.ޓV3ˁH|s?GAp%%qmLbD/"^ً#P:,;:)yLT 2vkΥ-i} ={_fNBlܲb/W̐Q 4e^X* nBzUFw&pXurgMp;^u. 4*FIX@ PcC`MVXM%n1;)rqkh{VM+7ug9c4;zzKa05S#:I'?Hge`guJgr(͊$N2ŖFe `XW3-hVȣz*u\^M8 :ResļP1PeS8:`؟D86h3? !d4 Gqr ZMgZΛMZJ&8XNv ڕ0nhߑ1QJxnK8z8`eIh~/g2%nỘ6#-:N6PxbgH8)!*KFzÆj)wN#wKO(nѳ1Y+" ;[[ZQ=)</N7KsVzBʸ8[oY ^l:;FqA="zslNNr (ꆆxXπIbR^8b=7[v+֫.WfMi9ؠ y J&S\Ӭϊm xV|g}v"~P_OHW# NMЋǣPJdt{wA [_ʛ ;y*.m!56ŁNXS0'_$NSTOgrBRnӵ!r~CHRES0َdp1K%pm>SW?-Lvc$sz0ךʿNB<ҵA Z޾qktsJg,\vM:k2S5~] %$Lr ^I;gCVr*7pWd+}`QP96DT-qR:ssNvEsYfjzIPb?}]&Y1oˣF _]-|J>c@8l$<+WMC2[p>_}Bߗ 3rElhw"4hOD "X@$|Q燎"i!ۈ,yrg8IS^7I}Me2:K}Дߋ)Dm ewByJoD.Ax^ߚaڄhr  9MJ2(RRcXEWT钹$V%(ysNMkWVMyvfVq+LjSa/ MXw:1zsi}E_:[q8F^]1!~CDP4QdIBVo4 ydXg|`5}/w%3- G n7Ji%#xP52=i-1Fg6mȋW"9 e:"16$AJqULC?t/}\‚ z0-JEz>՚7.&>fbW^b,>?a=pyªꜪzf# H\τO}P []# ^^6KuUL}HW-xLy6Y|ofQڮiV, M  !-VTtN!"KeO@G*~Qsqc xF8%KT>X#šVڞ*r)^GQ3Iie.j_:ZC5C  "s( .(T! wFO}( _5/v %~I83ϥfp%l ;(^ЦRWuXUd޿U/Bzr(j_s P*O:: FߤՅׄ@:F#7a' z@-'LRLl5w{ .(ͮⰽ?0l-NЗfz/ct)ۚ$~HMA(ms~Z?7:u2LI}GĦN%ZGFt#1Bn-)FUj'An5ŒV='MpcH0d"r YT>іۇ511sfC^rH$z,}4)-^5 ~FpTtq?}8n9(.V$T{ ,onr"0YBdŔ:[ ihN^q4@Sij s]3 k^ӎ>;uN?[y@5pXlgH?b\4HFTݕ#^믝‡8{:xk>Fc?9CaXPBx*85^MU7C+w_c;5 [ QS}_ҩ!+|+jzH5 x9s!QLFauЦ@唗5 ,<&R6O&u) $IcB[0űgthX#ozz ͏G1 <,lֽ.C6d[BvVyk( B?YH8X1` BIGr P T<S{mTȷlX4_ubܐ>+ DIgnj{$&u2<0._m/"K ?cWJ~-y@#5#hWGrѣl82{72~;F1g#̯%7* iPa*kŰ뀻ojȚDWxH,C"/a!?$D-YΔ(I+9V^C!u/N%Z)9㏹jcELl&ľk7D(Z~QRf2wv;RMMIbӰ`Kם:Ui3쁝daŨq"\~*ԛ_PvW-t:e1z`7O%~&1hȜ'AO"]% Ev8o^FđfECa *6I[yFIKljF#!lS{#ѐۮ/OGRcEW'%2M| ٥UbDr_Dٛӥ |[znqx隐+B&Z&1%4-Fؗs$e&Xz.NilL{1=AϩNⳆZ LՆa~Tz6~#!Ǒyx}nJ%zI=1{uYHwBn\a)`0nlTN_Հ!ߢ&0+)p&>5#ewQ=S~#GG,8\Ί1e dnݻ$9ZI؞՟?ŵj6Iꚶe82\61\|fQhqX!F~({X$P󩣪qviK6?S\ OIz;fzQ]@ wLeUoCH3ņw^ bfbLK_w: Zo(cz,?E5.U}:g&M ϧ5—vOa9͉cHN҂"ܽ5Y6ɼ-bɞQF~ 'U l"OړJR8n {7߀XǽnSAd"MuIQfw w%`ߍVDNQ}TUU3*ܠU;x߲ߖ0 >Ұgb0I @RVdwسwWd!z*#u˟OlX~/Kjċ}؍ؾAz3;-X\6ʑ&']fi vGA",ļ "W|ӘJcG}9p@_4=4F(PH(ʧ]4ZŀU]t( tCՀHrzCVT[CSаb.b !11ԺȇZ*(N0݊?eRI?'r/Zc')?oly[rNIP;VnB\* :?T hW`hU0dX2J+oqu%fʡK:eØ(qex`E7(+ݒx4<#]PtvpaRKGdք-KJhWuYC"xԶ&V$7Nq"zuL4]`=NjJ~Tlӯ =VlS}IFU ey`hZ֮^zaD Z1/ 3>5\5<`aF֢ jو5lR-xϩ1dl8$PD&ފqZ_g6`_dyg4QcMrØCcBs$YJ:yUp`?;a1ٜ u쏘fIRMc.Kn묋̙;wjt/,q *D26J8:T+3#قJ%(bQ1Fg8/ h&Hu2i2feE-<)47N6InЈ߄YH7THo^*jsEb4l*Կpmpr`Y*fʃn&9 E1u1NJk1 q`9UdIAG{>'L ET.''ύ6^klCM30xi9JeO/*ii}WYB? >#+뎏Ȅen"n\Ds <E ?s !5kΗ$37p  ;ٱit6Ă\*yM&UA#Lh7ȅ%bKnˋK,%C(r!{ jk1ߺgOĜDf>kg_ׯ^T\OI VDD ݂>])RҍSs1edN8:aCX`* ױfWx=#J=BKp ݝR ώuZhk Ceخ;a=U_y2 TO*AӨۑD,j+H,щt4- 0H*X BnhguG>>L>1Ց@+o[},Kꡛ(vb00U}xf{%w̛Wo͙۬_/ƼK<$Jj p<|́ I.ܿE#/$LZm xI+~vݴ;:6Oކ00"+A#?"CM$#>W]v+^%qd&= Xٳc^/T~6j\%Y%B^&M<' dŒkp;ܐOH#=V9{QH<RraFVuR|d<3lAT6~zEvpaoEZu@aB{ND+  v?)mW/#1k[Cg wk]ge[c0hځtOU48 cLN) %EQ%iFZF/GFSNhG&>O:5dyQOh(`!_z[[Z^1qӶMi\J 1oXB؊1<3DIm_%YiT\X%οs_k,'!n3GQji!vmr=*;-Q"$ԀhFg 4VN^ok5$ĺsWk-2Y &X :h}K!8bErrbV=(H,pb]W&soXRfBPɀ`7Z1w!x*ih(v~#LEk~ G G`dBj'VWYdXZ @K|mX,Z |1|2Ԯzvh:ױsBTlVs<5c]I,rL// ,fC5Vo^]뉅MXF4ȇe*C*ƪrBiՔ⿩YiS;TuV'gJ '"QZcyeݝf"T1Ċ"/c 1cǭ#|>pYE3@r?[0N{fs &J?< "UXt%ok$~9eOB!I]lv>qȣfQ :ﵸR uQ~w˷/G Taטk ($W|I$$ֈ⢺U~ ] PVA#,%z2|PȫfkgyўJh F 3p8ܕ&ciVs j^w0HF[9㫩3)v'cW{X(L]&| 'CXn29%钸P]qExFP$L›zmnh'>vCX6f{V1enF%A ) y0K*F}U  R̷ݓG0CZ0@.8%w{Ƚ rD郂rh̻w齜vYP}F.Z$!vvS %VH2` * a{k+U3̡S~XerDgȶ%Q& ^ o."Q0͝g R XN0zÐ>v |>t>eK&r]gFdep00%p|y=CZ6ِehPzNM8zߝ4D5}ڭWF]~`hNp^R< қA-iji"T+`: ω[}no~;zb)I8l-`h&1Y᪋&0oC?%L6'tJi$wgqՒ;JJNzLB+~D__ "5ڿ6cH nM,;Q:9݄'sR!K"Aؔrnˇ zZ PliǬ4_8>#$Yx:iVoAMjZuW=gg,v%WߊroͤB<8x>4Cσt*2~7᯹pԧLgNAA͖\:~:kI>\41]=$ S2'R<Ǯ爜8,.bRTfju>LW$JgO-[A\/}0*I 0% J 7N/QMS|};OϫCGց݃˾eЊg#BHw8gؕ Pzڴk]?eoy~t3rz"@BIa+ Bq`qYuAɖ3 ϒȂH]^;g4/ nt$g*kG#M=x[ט;IdzU3;ƾTFڒ#TR>o)Iw?>!~5u mvAzGg{% ׏Rıpd"h)V=Ƴ3oI;MU9<փF +3}تH:@ɂq429SB]ER7,;>-AkoN~ f;&6g׮#`#q >'lÆȺWY\ iYIvEcR\:ִ<1k>c ?P\5t銋8<^m$ reB6|{v.ttn -OJgPN%xNj-Uc)̐FdQݴܲ T D7|$׈Η5]"S~.0X_obW'4ۂ8hQo5r s@%# vhլ@\'p]HO0s o4"?LJ02ӐpSs-hIv+(xpmr* QWQ~h\\qLW`, tc\I{K#Y F ]v$kn~eAk| ʪ^CW/cʢsɭKC0߅b}G,$Qx(ݭg%Tߏsp@CR:!(fW1Tn ]O\!ԐLR gQk bcc:IC97N̚=NlTv yͿ򀮢&H k:YT@x$7QYKXѽ!q? P``# KX3yuE[Hb&6b5Fa\ܫH,PHԯ;#y5Ek[$\8?؏ݽ$ȶbR'Ij46nzSHڮEbІ҄ȏD\vvdb .>-|u:[tSE:NTc a 5ˡ_rDWۓJ KP4јdyde35*+ P}?@ϏJ(>ePՈ% }eU<xe^J&z#F]Xo&pbo4uCSL]YGA-YC =,8^rf/SҔOV\#Rj_朠'ያb␳$*Kn|.~VS*ƤJnP"gbr8Á7̕r$x f.?kD $rw0NQF`5 ,|^ȵىk3Q5]5qVG&FI$p =>ϓFg{ mҧ\oQ\葸?HY@;gCx,\{`;e6Pz/TU c}ٞkaWZ59쓤U3 0 ezOZ` hϜ[X{d)5oM ]0ңՖ;Y}yƺf|S Y$[`Xn#~6{VhtWthd'4-(4ҳxw,%ձ8j Hg!Ξ0jjFxKe݂h^_̹MN;IiHz9<唹R5vYED hD@jiTix>V2btB3ˡXp7-%zE-ֶ9p%bWl&4/ިmLpze͑ ;&{=StV0y-hǔ֗03G+Ya9BOXC;hYV!.|Ne&ku_M*NAz{,rHQMgQI`oxEhPrZ1| I26ߎ>`Y.t'P(*Wvawdh{6aHtGGq"=Oɧ4-Θǹ\L$dS6]ڵ]oU(8X즧! `;+}>ɷ AaLtʢ;b`/ ATU0JAam>#giM,-g|}iowxt XV)526˫.l4{?q`஍]v>3Ɩ,c0)~{"Ml>d:h,gV59P02bz+ kB<șԆRg֑cpՉYdb*XE7 smD\h8:#c`>"W&vM%ÆFQ1rMͲePuFTt҅],&'wa E{R7 қ? ^vy) ۃ(5<^{r+l{pGj=້tp8_?J5<~Թ:ׄl:WB$dT=̐pK%aUNhR s#6}jÀVޮ6eOl7;`}\R0xtRBg~PՖcVQ1pk= ii{0$e ^$$'ELj#DAk% c>AX}oaZ;lbHAjߊsh8{a:o&C#PoDTk_B,ڕBxΎ0`'?*ydD bQo'  [[#)3LX%pYn9-C/,XRTY vxYW"9!Dᣟ$X |v"Qmڗkwl\2hW$ɓ2 D?#J^ `R; n*e9uui{0ԇum;4- $F(zsHFEba>窱s;i||TWXDޓ8!\EtYF <-ܤe齍s@3m&kİnkeU,-zPnDTzpc̸^hM^8QH?yt =b!zId9,mIbiz%]{WRa[qY*`ZU [:Sqb)pǐ=2yswbr,%#]jib icuxIH0/|fԐk'Mc/ͦʪY-P.T62-R;FYkwMD ߏFx h@,R2]JB3bMU, * a=L臯q5_(HkڥB"ec%.V@X`9p$ (4*+p>D)V&wi#f7xĀ1DE3Es&~Cb͚ӶDl4Xn,!G)lޯlҌT h:QU8BFpr_I{ɝ8e:e#k4_[$0)z6bYZ<uߎ !u/h]8:p6F/+$\֠I6s;i^H_Z Z!+uX#|'Af]js`dO8  C n;*CP#08T R^60_:B$;Zփ]*+[(h-jAmwl_c?MYn\MY% ` RXNc))s\>rˆyVgM.d7ew0nֳ.$iB15 %ネwUa!{2cD`R!-E82<aP'x)l7d FL˩lgƻHf|B9=cN- dNtqF/QtD r m]w*,8'Jŭ-ˀv53%Qv,  ֲe 7Sz!L\I*pӗch~jak"ӴPw|''h2J`W0˃Da>0]6iCkܾT$[7y̫!Jz1YpPT4 !PPC1όq Qm>g$7ce񰃼dScgiV]w,#(oMQ 4`Zdwr[6lxԍypw Po g-i;a ٥%%'WV* U.ZYmP"x_[f#Z;8""0LO'˭QuBb@VqP^ 8o_?{MtZ$/]HEk${udw>eUEAiɜL%Bw h)dk=i ve>&T ? Ig'i 2aGq6{i  Xx?w>f/'22o}ekG9j4(FSp]G݇mdC!^0*O|ͲAv&3k=œ&zGNG,ij-ٴh+$XGI@^0Wz2)Ha6m+(fE?3:/Ă )0ANut{B2"s. wr4!NsפZ64!%Ev#N-GڔĻJ9ɘ/6"d֎z'.#F֪o=hc(IKҋ7L{>8x>ETa bLY+ >]T .ZkC=X2fL\af~C\ŭ+F*Ӄu~ cp,TmdMW/'אGƾBNKǟݎr400؀S;O~O`@Qz xmۧ?_nRdyG:,T]2Ыq+设6J+LoSI@EWF9Or@9{#g@?1iF((;HLzv:=:`a~GTEq3UϽytAIt/ݳκB4 c.xW5;Øo-NNjQ@mP[`1]mE @z vlpHJ-PZl *j_*Ǚ?WҲ8AF61{VAe|ݾCv~T=¼fhPԘ=&#a#or0>iV\GTU$Ɛf>8`7`X6 f2LJXIBӮls43 IȬZuBPl5 燑Ԥc F5 Ze!5c\,D}a&@&L` iLV Q`8AV:)nq/w.Ō2~l)FUМh=@U`))BΫ#`/2-C>"Ci`ej=~!0Qwf |ы]Ak'+L])mT/rp%K{TH FkI*pKpڒP_$JM̅dƩHKm+0ó:6>$ Ok|)dSw78k&Vp'moSGO{tﶯ_VH1l'9*/w*2"Q2<#?jpɀDGȘW0D_!M%RRԼ7-=A(jCE,+׳91 FDA!)>i&nN;E[*te2Qm>SyD0A7Ohyveͦx}#y Y5¼z !:`'FRW ߔ9 `dwb: v7F#lѢ\0^Ֆg A,)xϬuЦNIc:tP;$G,n*PJ%کj]6\ XWEOIJ(=weAn#v?&_؁/<\XpNX[6ksU,X[k%8;dwqtֱ7g$&v`::ݎ[4Y Sk㎼̥sqLX VlX['鴑1vQ! f|\ Vm ,W-E@/)9f%N?v/At1)Otp?hlv2)m͞@"B"YO~ag_ջcW*RY)>cPy^5@FJ( '6ӌL5r[v,\-HT0BgC:!H+8za+3-ÎdJHBj zKS5+oȻM]/@c*L kUSlgyyPGݣ wFӅx,Rd'>>i>Y찆˳ `&rFpKK"2}Ewċ;)'*n}(uD)pe[t4[mF(=XѽwwR 8 TdXJȜ)1R~ڧTQbýnv@~qR=pX1n#iB'11ְM'*el |B_IDjn6C1a'v~f46 >/-]B0阘9c|,RD/1B8!0~>*=y,an|hF'tIk_<~`؃x`7pkt3IM@϶i64?J=Ա\/01uֈ;3AY@ ħYX .?4ѰjGޯ g%9Ǝgoˈ)QߝP۹~mO._K/4H]>gXc[NCwgt_UaC~rǹӷ!1&h"3mQVEJ J1g+|Y+h LuRb/b0WsLX`D9!U^"N) > x1 Sbu7>e8h>8pU=Ѻ j+ :{]/X ϕɿ{h.\UY/t!U@)N+$$'9ShRs8s$ekH&''kgR7Մ b݅aƪu}b{P>ũb"R9]rC`r=LļqmGlegCuR٥,]^wM!5F0k%حp$\. L@2'Enuc"#-5,B.z(|+ 4 GjD?#grl`.\|?S@~+e endstream endobj 454 0 obj << /Length1 1390 /Length2 6200 /Length3 0 /Length 7148 /Filter /FlateDecode >> stream xڍtTk/!Ȁt"ݝ" 3- "J#)- zs޻ֽkϳ{g~35!<|9 ]Q?3>1]\!p˹AN@iaU7(( E||bqAk@1\ vdlVp#b4@;# Ѓ[A`{l@8zxx]y.Oع@ vq[~ 9ƃ зQm 0B0Wd@詨?] ;_ѿA`AVVpG' @`:9pd<,et |MjqBB&y 0k9#p՟<lu/ku=`>6ͯݜx `g7_H?:[0 '"`O+;_|N`?  r.n`?m!V%';R ##7!}2Cbz{J*ZƜ$+ p @>>, _]44";߹4HĂlܔO w ݿ?(AlzeG ľ>V-K*B<"k]!7lHVY9  W$H n]B b$"ih `/ @l.x)(E>Kdo0/[We+7$~#$=Vxp+U/+dh<7%'7auiq$Nf/xr*ۉϷ$+kݱ&/=#TwrKo8972g;j璝{t)yVwNol _sDL2XfNQ1b!p8H=&ON'Hߎѫ&p1^揽^(wmf6C?!c~zƧ`eЊ+ t{CSe9 85/:JTRrcC"`W%O" \Z;-HEבXWaO))5e~RvwHg(Y{K̰~]I~/OT^uJ(y&hj_SO;wjHӔ7B݆LiEQLfR°g].!\5W2W+;RĚ#'R[O3Z5c$DY-pb;ۓ,{ !{Le b+캮ŕ`Evûru#L: sBf%֙\-G#U1D!Xhn3V-6"듁?g$oz:pY<)(IE^R؂jh{kiģ@}ؖjtgC"! [MS9o5N?c|3J_s}b5q\Ծ՛Zeb~G.4q]k资|Qb6U#ٱobsUG`$7}b\ܖ bOE2gv$߼VhM=0wlG\FN, &mQ2`Mj BfMe\=^ 8AóßnM PݽuQ!`|JR6:S+r18iN pc>JS$0oD[GI&O iy=3_Vy5Rm!}i6Ő3F[ڭnZCs3^a"猣h/7QbE*nve24WL7 |Sϧ1-%-c\b^jt-1{{]=?`we1ۋld(|v7JMe7?P;u=m"gEڇ}j G>`v.>GU6ڊc{t̮/*ޯq:ڧRSמj7+;ȇēwuaG{2i=^LL(\ ?9tԠ ./ /|'ipr/5dZ[½;Socw\`ޫhf#r14fi͑U3p2ml ;'2JY kЩg?8R~8 8%^1 cJ tr7#mYTs I'{(X>G9v98b аz6R|iqYѥ{1aPzF.\Ի4iAļ8biwF~v`{ W$ƴ]Ee9FPd:L$4'|js1NJ;f% x[3\PV(;XKE`ս" oG(ZirSPߤ)&HX0~r#;X  >Sa'BzSga0JWqrőϹ)K!Y{ěz˫@'УKթX{ l㬜Wn}^U8+{oc6"R~l?0c׉!a 8o R%MFrWUIޠ)< #-N㔆U5p1ltxh">:gpŕT±HnZnDRł5m1Giq=eh&ǽ`j  9(fqie֠3PC&,f}WSs=[w8myth s}%=U55U8huPf܃gӡ97ƴc!֝bk9#- ,,K)C$WdJ3J+x"jNՏ8b9%}4|F| o/@WmDyqŀۧX̹1:iۥH xe{q_rՀ}b%,4#kMЎP"K=KUyi%!*Bsm5 CxBFasawLUXч&6{/M\PTy?i2:=8Tcļxzv0֚ k# [(,?-49lP-H:`da@ ,%=aatq'B=8Z=T34POG!P~s5E"x\/-Z7pRGEhHeJmUFz% v8們ݛM-k=A VS'r)@%xrKĕK=US3D D3Sⴚ`N~0Nܽ tQkFWN+T> gFҾ+$nǧG7cYk`wtV#&땢`B \>σ\4nXFS n)E'r{ksJDhPyc=Z [* KSQbᖁV1I|w|TSPښg1UPXԤ/MLbv%@?aE_kBPqZl-~vᥞ.TGO#q&m(њqrpA6s9K`$n~˙o?FC_(o1) *V"4~XX룩A DǸ][;N/{\y+hoiģYϴR75Ž !qdq4 mgͷ͸:wZ.w*Z}d _|2"߉E|x_&'a2j?G1mǨyO^H,J־|V* a!JFX*{ 쯦2eafblTlK =Y72U66b_wjew ;E_=s+>~d|pI]H0;jM'Qlʑk}l(}1sKOv[Պ̠DA넟Ϩ7ϔR>=E| `196f uW *gUxu~p!+vOS$LoPQ=]g$%1[Dš l11A~[ :6{US.|$>QQDiTlc:Ef>Ces"] ָ $^vE=y _5zҎ%^rWRkMwo Q#/R]Ƒ oz޸g <qͥN SvhO+ٮ| X:[^=>RNe9S=.PΛ!y<̹摑kH(pX}]_EYf0}˲ma#U.%YxKCVmKNdtW::@Tjō_΀] d.T]H|qeamLf۞_G4[kڄdӗ >jbN:[08q{2LfAN0X(5E1ѡȊaqK X}Q٨xh8 Ňr%dVM9ѳjnޗzEU|D̉fU{RwӐ? ΜT>$~dMMiXXo20/8LJsIh."MPwHB6U|B-J&8[9}'x!:ԫ?;KL^ %Q5W{'@31k^ 0KkXT?ڭE"6Z}L r+3WKBA|w.ȣ@XoĴEmjĞLUSJ;@,6FvJ= D^9 (&t]x W*кPKۯW9"LP endstream endobj 456 0 obj << /Length1 1905 /Length2 13104 /Length3 0 /Length 14296 /Filter /FlateDecode >> stream xڍPX-,@pww nAh=Cpw'8www 5`>k>g]MI(jfg;12Ty,,L,,l '_RDJM d^4vzI;)؁6Vv++7  | c@ k:"RٻC@NY1r3B@`%5 @tr4NN|̮LƶLv !Z+ tB\f?(l n rKfgj ^6 S l^sdJ@_0s5V&ֿ@ Ʀv`w`䙜ܜ`? m^]A6&n UM! {'G&G2%fv@#I @[wgYv`Ͽ -93kA@XY,,,ܼlfjGpuw{J?į{{_[ź?.@o#DVV `*__r豼+6JR 6JL `dds_E*Xx*`>߱^ Cp}N/g?_rSK5ٸGWgW+ؽNjWke_g@la%@n@3e_TKǀـ@e;G tSej4_ :4Rljgtqr!cw'~EO14`3ڞ7{rq ^߈,0x^߈5?lzi7x{]Xqff ;__ |l//^m_lfԯ+l4wG_[zy7Çī>x=ŪN".M L#1y=9xiY4CuR88(arv/[ ^U٫Cz,a҈+5ɜ#H u,F l,=I{&[z:c|]+_:u{#Sp)f?mB-kGV'Vzcϸ}{C/n i[c;Y~/v 2SPt'p",s(T3v ]WZ5mfc3 IDo%\G/㙿&NǕs8'ܸO sw9*gF>kN9$t08Xx6vn-OfMFΨ^m'wOߧq~Yd !>/nrg1˾uwiȁ{܇w/SDGcnF̆'S}`{A)/O]2!ad:y8ֶxyE|3m+:lój3yRR#+J~@tƒ=E}և_5Ej98ʣRCni:|2kP4wcv-LQg!cJu}ɖwl1K_ǧ}`ҐEڷuF"F(gB`7wIfC*SD(3λ>p816ŃuTu{/ř%ON(Z.iCZݦ3J?cˀa=uqUA@ +A!"[dtr5rѱt2TFCYO5|'L{CE\$_qU_ f`K 0։tREG~bz,opWΒ!ܿʠ.Y{ҏeE,-6@+g1`e$8&ϷqОS2'8%&ɎzNlW!z@IɵD.C,a~}u=,~݁\{GCC/ŸɣxDkz#;=cXWkͭ9hҠ8cwd~%/g>6a1SǁGZ^Eͷ_IS~Zw<)鶉O#z'<ը@ʒt{JFv|5uD,5gFg,>{3]M]o 8@_˵-Lr(zN-k|vU=:gAq8 '<ȱI}7[*&4zKM~]Ic4dvk݅ȨiȞoh'؍Pf}.l(:Ֆo2RPɿd|2HT_?~i5; AmTmOz:C YX@!۸AOETf)72s ںk_3}D"" /+g}붐Z3f˓@38 SQIIQy[IfzQLt v7NÏTayBˬc a^"6 ~Y ;▚Vh0,~AaփJeQ'\뀑GOn~h1nՈF2Nag5}@;/."Xf_vu l)HR}f3T(PY:Z[ne$.@xѷ,뚜JcK'K۔Ʉ[n9ڻKF`~\gJE$ EhubB41qmm6V( 76YZ\'aŖRQּbwv4*xұXR +(dx|#'Ϛ{j,hZ7,NZrx+PlXF)}u~Oh*ڝ .E w;|IaGݥ}L&}Tۺd.kDvsaaN2aи_LɰsAΎ=_bFTh_QPޏ (|MCw3>DIKW^ouN 465a]C֫N6S4S_z=)1o@AԴU>x-z'iIZ4! Av˝N-:/؞D"Y#;͒}< 5(`/b|l͐JK+ato[!3cmrNҵb_[HG{vw,map$ܳO465̗$QPܞ*l [{;WR% Tm'ACoWl$n=ŝ l-:"޵mh{8[E8'R=/PMgl4 d*E8Bdnz9~t=S[|y9>`[)Wjj9hGL.GDR:~Apǻ(ց&D.¡,Fj;%;IStp>f%8BG ۿ2)Y1ۉ"6(-SC^O[4+K vc_lǍx(]UU3 2KbƧZty̎{Foc.bޗQʨ4]H@4j a \7hVzybI n@RTΆD@73|(z;^2.{I__Te8fw'B<$hxZ_%IoOiL>*`ȼbsV"ű#ܨh/ Yz*]<^P#:tg /E-5R*-T:+/bGsZS1/CxU@+xd6g&L wfOO:k(&x^8$͔TXCB6Wb>a>e `ݭ+p!1zKH8N ZMhVBΗ41k%S! gRSJZLhFIުzZ"bbX!P8hR s0ήcWϣPvIhG0S3{m֡'AFjPqEABKXr0DA_NCbMȈJQ>ȋN<9/ಜ2EoM"*-9 M?mX8Ï}osD:%(xVfY@Q#[w]XFp"^DzA"tquR|gȘ_΢E (>RqGf>RͰfMڥ" 0]?W4G;Lea%Ob"l&Pp&a 7e~v I,NYzZPa[XoU}(b1).8s&܉Y]EE B=xؑ^rB3L4r[g9ZEJeFO͟6%"3~zDL K%+,f*UTp!}@Z $n:d ٩+oh0%1 TLT L8 ixff\7ojIt˼E&?C"T}]&Zdy 헱{XclbQ!|s'*h;ob2xڊk,578=ԫ (@HҁQ N.: -FQ +ʅYE*cm/F1E{nYdy ;fcPQ^{%k bs%,8ٻ#uư48R 9EB^S q^68]loP9lv 2 ?+Ǭ$Ihx', Uˤnj\2.x?NJhQ5-a<3TP=je'AvΨ?v+UGiR0w._hÇC@ԕ56/22T,ӞGp0,SñGyv3Rա&d Ǘ8qI4-8WI(<4۹/A٣ KefP0rw{cwd} ;v!iR 0ό}ma>eoARKôqaYvDJg*j׳'uzm*tg&{]+s/ăa4uwoÙN G5g^Tx_Wy+ 320 =]aXSs,dsh~FKa[ WЛ QP~zœT0 RfΔJA`f&7o- 1gt5qPΘw pxq  "}$kyҿ' M|a+鳵 >}R;F.l_ڵؿv]RR&I9Kc䂻ĸЫ+Vm!0-VտLcnQujpL`R-[t{ w)JB7AWLi4n k؛-8؅(zPdV驧R탸OcNm 49uXf;/i pCRN+wN\jҺsђ# NƪԽH>I݋Ӧ#=ј{xk=deF黕MULyC8]Jj?2B^#Y7KM\H2.]=,Fԥ>ia_ON0= qݳ' Lz'^uwMɲjGVmm`=b #I\^v]XΗů*/]Ԓ}\UWh>=d/u%-x[NR͐d[J2a?KRG 0~bwH"l򇘻X )%RYG"syY!K߆ԭ%ApQ8ghMz g#O8Xߣ|:xtyY&JP.xAf2d{-rpӠn+Df4K.$@)sfD |U9nXT4m'ީ dm XQM{[՝ȏ\"]tUqS,u峺XF*%}~N w%&mJ b KwҞևv_UO!Ɯ^ђjuv'flr,%n!ɍb_+SSGGHT9+9{6B7PJxyHqja|y<ne3ʱws𷈆j~3Y n s13c:y VF\]`ՓL9k_dOQnDO0We@iڱ/KAx?vvEwI^˒p^ u@ߊ)Nb 4L򱻆a{t>X:[A X~QzmFmPU=0_&.,|, 2-)i mz 'ͱuRQVgZna'm%viš5j[C4O$*2pFņ[xCWgJQ Vom]*Gr@>_Uҋ$n!}ÈTo2j渴lxʼ?!IQ4KGK7^8ؑU Tu01'ONߍWYUFI.(=-Pݠ'Ϙ|R̷YϠ{A/ޘPҦDslx2֌Sroh"E[p9=v(6俾aMۮCpG 4ݺ'):7x|A&R.V;IT.C@]iw[I`d{vh"9^v9R=eBWM!boLP Dm7~ ş݆b!)lQITIeze;(٨8A_ Y*-A&E]|:l$ΩPL\ 1 -{5}`j:UKn6&%iSo{e.ؼ5ypGy X_H!i3Ւ7BIh*HbӍ俢tnWxp-L,'F1*) _`֕}u'b_ yrX΋U>CƵX1VPEc0 ,}:v;4CBϻ٫iez>,^z*I~٢\#rܡL 244p5=S e͵ eLp 9rǷNqNz'!PMJ|ih@y'JNZUzxƨ| ²5~k!1BSDMLIi鎄` %4_Ε7j0#5iM= ekOGZl 綜ƪRG tөŮE|Zwy]ʼRcSF#ztr)y#~W!m8xA/4َ%3b5Goh[Cv=~`=s.E>l\lj{%_YinܙشӃ~zg?# ezxipf<%Fzσ-ܔ5]FC',EC v~ian~K-vHm4G&@G'3/ӚF4m6KBn4Cz@"/~8Ǖ/x܁{,.t k4?@Z;gbp5GjgLOe {x˗։ OV׾!VDB NH&$Wr^ln\V=R/y{}5~C[5RGrx@2|E"%TNwtA.jR␞6|vBlm6tict{K*t <l:5>k 'Z=t6B7[O$N=}(m'/M2꘮ 嘡O5(W%k i&Ԭfʼ$fsn?e%5'\5hRhEMS~)˞ʹ &r8} HX,||sv02wNT };3SQQkC6^=+rRL.8[7UfśTCFhvBfuQQnTQo!YoqKZukmg_:X;!讀elZt[M6g}8%?)"|*uQ< qR[ʶ3 GBXa_M?gB%.!\sA#b!aᕫS*yy:Sp?_ c^ָ]ICBV@M <:2ol}JV*}+7mwߖ=v~ެ=A0#Ls{FwnmkM OH7^*W\}$\K׃J0 ѷfwYJ1rDM<|}e.w k e )'dN";$콥Uv?ko0; V#f|k,k$cyTw'#D] Me:ŰX"!ɕ8T3Uq\CSl:Y 4CAwQB<\[DG}̨8%gXD:/jV7SXF*bLi/tGt Mk9&'l|N&&*ks%>a8k.' ~4cuw5 5RSi|wL6Vjf6JSIV2 r3$1K'E;#)+ c\KE(ηI ba3-%Y4W"ôf_MXk:92HXuv͠A6lbyW{; /ٰI?:,ό*XFDI_ # \=9X 7BKJ /8hW¥n)֨Xdz̙FE?RTT'8K6ʢMCcͽF=]\v/Y)ʼnq W꽷;f(5;.=*!JOe9hcZұf>^|sNƝ_ѽ(:z^J / xܽ Q/n Qdh|$}0yɌjrӌgKat-˄FL'U?*+Bk|\ a4slD ?C,*Bv i[(}"kY:^>#Q"{h+7KSqvϑn*74u,G؜ _+Cv¶+#drR,icR3eJy\a"My̛l#.I<0,YjI"CU ukx?ӏ [IܥR\y-.Ֆ]܇2c&e!KP Y2w b~!ЎYʠ\zk 褫ź/D-3$8~Vm 7 $Ѝ;u.؅lNϖ4H?["ޟ/)&ک-DjRr[~uR$5lIO(Ci#{<; _ƐaR'N#ɩ[}#.)rpEn%}cLeʡF1bYKe|rPnҠӴ0Kȩó"A؍ys.)=.[ r#b _D^HQպa}TC61M},{5He*4Y2|mrcY<}Bln>lk0=*en/NAf?b\V_F;Q8'YVZ8Svb++xnցn+E]n_]@W 1 E`iא1/8 "%ߖ/1hp衡(ϓ1Gv#)cn1sI*Y(10'y^^FyU WWpS&gԆgb> י[tjP_C*ف"+WM\/,k70iF蝬o$E;4x/b`}!}:\J!yH?=_%}w{BaB\aIRZ6ؘGD꼰c py Oz+רsr}Lhg;C̡֗kC~C~Q?6\]WCeFa.uL>frv="Y8 "ſ G endstream endobj 458 0 obj << /Length1 1744 /Length2 11160 /Length3 0 /Length 12261 /Filter /FlateDecode >> stream xڍP\ր5HKpwkh\;; -}7|9*LIA l9102UXl̬Hj 'H@#l/ 1U&nj(8X,,\Vff!qc@ :"Q! K #G ݟƦ`[{c;wd(J1:904qlM^ , ) 0~M! {'GFG=2%@;'G?AZہ]}03?^1'[RTdd0r0XXX\71:+mgU9dgo, 3)#ߊ$mlSe7ٸm:N[ ~k kuf gv2~;׉fadK r͔@N _r?dT;xXuL__" BVlǮrp!cw׫~%'Reg9ǽrrDEq!$C~ %.?3^c_uD,&࿐ d/|a/|-_Z?FLv׼k"ȿ50y\GSg羽^(4EZ^ZUwe؝deG Fܑ pS㿿b+XqyZ WZ/r};ziL#G9g֟]]:Flvfg*îs4x2>S6WVXS53@OFEss@vdvTֈwJt6u h0c/h/{CN!Bt) o.Mm1jnft5M;+A.Y h۞Ҝ]8òֱbi}MC~jzDyLMj|eEL&C8fĆB_Fasu>~r>d3褅߈d>ǚ 8EW+} ǔ C}8۟?@,47R;F~l(|S7 odۘUYl9G À=j$%9P"ܑ#8LmV`lЫYr,8&\yù$O搠S#6: Jr@!5gJW|,O>Vے3.Cj^2us n\ :?`x޷P|VpuKyt4'xD|D>)F ;iMU[WhXzIxhcxqlV;Hw~vJjƧ6gښ=qFqM; lUF@ݳBJnOFd]dHe7iu9(saCİRW[zliƻ,ԸF,j0/ܵp+JI }⩫[ ŀ'?b̬9P۞( Ei̿@H&Q6^FVdįܧ7 o"S޸M 3Kvtl5%Z*jqaU'\4Q #BR3}=7EeJCd2L&pAhEhu=?)z–}4Ԍ4ɕU4?71Qed="uB4_~]*ϡIueY1Njޜ Q)#-i`eR҇>l T)qs|竾I AsoՃOm ḋa ON}bV35ILYLc&>CE6YS)q8i/tƶU]ngӭpvKX%hKZȡO /G{T;rGkzx p?_UzW\.\z +b3F3 Yg|7k &' n.$8nvci_Q :*.c`< wL}nfeLrI 9"RScSl`>J|X"5[aiL\l@,g˅r?NNf#.mf":b+vE񵄼**(OHMV؉3u8NbtJ;dʵ PT(67 '/ [r|yfґj*o'|_ h_A-+gw5UZML2~,T?$q-`7:-!ֳ!ExW݄*L`:cgV3d8 4xz2hMtQ&-i-7r~|?3sϨ>c:L !wŠzc#o35[W n F;)ŘA ֥-ak`Z !Vcs1d  N<[:?(O!+7nqW逦Ϣ3ǂwִ[~أ0]Y/"F*j#}.q3:dcrhk?%w_"ض2QL㛧c]ZU7Dz мb?G!͎ %i .@A.bup$#?n4QXBsTK Ɓ7k@|d>ۈ26.UzX+OmǦ\mh^)FEOܸ3q0^Xpevv\0g)^w(֍5d\&]c`{S銉 h9+Fߞ0ErGv(55J\ a¡89\ 5&yKi|1i5[s1+O { h@up?xQ5xHx:t(/?JCfP4A78R>|j3=#o'L3`77uW]&P~xUeZv%➇_xF[[#t`F:# ɜ7J{j9.hfc M]E\/ 4ل4 i`D'dQǽHZ .T6G,BwB6Ѕ&*mYm 0;DgVN^?ye' i b-}0ZuswuiŽMNOS;pA -q_h֚oZ5gߥdv(ϙP圤*dкC ]!;nhB(;,DQ"0qc4/6U"8 ;=2aQ]Śq Ms mJ;bn6-xHz#Lnza5 -"&f44J΂hO9R.!hsz4>e<RMxF~V OžH4}|Rjsvp<ϢL3- Q'||n^;oN~pj6fV3kZmGtcchҖ„xmDh![Qc)X#dl:H]ye̓p3smg%PDĐDS65t75BvULƤ`Ӯ!{p-z"k[])'KQ۷AOgGd!58\wn]_;m+}T*F†yVY jF[;Fx㚗d-YI A,Rb:7r%DW._̢N}?S|7Gc&hq%+`Hg)yebUs)&[<;\ IidqNL>*mFF.wOxΘ8;oڄ)mlܳ+2Ze)M,2&EX>: S bJqvi['4{7e./c{9840Y27O!_w)>Cm*]EM !]9ez ?(_>>K!Pbj#Y\D!LyW1:~xaP8 ;3P6YvfZ'!jcw얣ӫ#-!RLzJjRflmN8 >0QGO4nRsH*_dA_Ѡ<-8hNpUF8YgmN L-]=nMPD%\܈[%cԞ?BW_D yNC'ʳl1 9ZK6R#$Ch8&'aX,@K`5u"* wjNV$Z8X8AYnflُ+DI?΢ W,IT&LRU/q|.&zpNK@.Ù'7рoI(jME|!9tԣ@;M ǽ78@EhF=~ӰP}w?Sy34˴@Kl/y#J@a7$IdRpm =ش'#C^=^[4ˁ ,8{lZ<໽24st3) $!&C'/AXl 339QG4}.Kt3JRXَb`ea6ܛ7*Ca<}X % ba >U\Q7P{)4P.G5lD#V?9m1;u+ZXx3+2dg q,|v ^B,GkpJج N=j=|$hQ1s5kBcĸ2mҞZjX$rG ek1 ʺdK될K77wRdÂYf tקʣ˲|2=#Ou*n/zbg;X$ly}-:D3xl1]gl o5H*p:dɊv??Ɏ>K4t8U1'~]98-,W'Ѿv+z66R6CWIOҞ^$*a2А vR8= z i(Cu&baTV`!6NJ4䆫iȰo7Ohr'RՒ8$,j:ZNR!j>FҙgG CWx4($ >:԰v-p:OSS詾ZU_óOS| dYb*'^鬠W:ԓ7 mqJm[efЄ-kC/|doO`11.^%!*'i|OMe$ۯW2x6ASUܰ,ewyo ؔHY~m Zt խoj+>y mk n@* HYaX=*><ek (FQ9G00Jtm_'"=mXO:x<렿 [2 ŦẒ_:cc!8U.X^m!%KN֗Pav#}pt-V" r74s'҃ˌVKL(*ɪok 'SgDLjsW9JRSA~GK^O82]sbĿ"t}p|,S_ٛ\=q!WWL]f .0c 1aaɶ{B͔DwPK0I_RCl)D|I+{>," bUb?g:ξmRA6zL*weu4#&0Ւα((.E`xݓV"ꬨ,m)Skch ]!/WVDu$~zL995絼Bm,xCƔs2pG%L5FZYߡa詄yc'&yHRJy>KA :(6BjTUlg&e9o,s"ϟy0yʻT*k!=o!6O"ӢnLIYW!v1FE5Iv)kLldezKpQK`5:TFtwh6U YkVHMBG?3 A5efj:y =zI7^`Q#| B5q`.{[J,vTђ˔˳.I-vIqJ:}Y+'2 v[;o*B~W‘F𧬗^ͯTUX1Au4ogg{H)OG`<dz4,I?BFu2NHtKyUDdǺOLgAiyoq>бxdͮ*RZuUY4J͍)bc;Afe7͞/hY&஫.ANЈTB=46a s)=^7*HWnvb]B/3A] zn 8| X/\qقx1h/krv3}0g*48$G9 J1(_^Lb^*f;x_g#*\%Ku^ ,lTzNpPt@yNi͗brΚdőߺ@6wȽk %SH 9<&猫oGş ZtKI VN~$,|,;fjvڔ1(˭JB} 2c^r{!۰{i))koj%g9mh0pC:nJ52 }JfR6= J*o'Y~nm^;u.\Шl75Lr҅Jȳ\Ĕ"; :`TT,Տ1d?^@㵾:ۇ9s tUljo: #EGz` ĽѬ~p; ]s6wzwX ,>=y9@BAdnZߒ&~Sn$e*LIl iF4MQb^ww$yq7ٻy=bRf\%"IuSȞQB endstream endobj 460 0 obj << /Length1 1574 /Length2 8568 /Length3 0 /Length 9604 /Filter /FlateDecode >> stream xڍT\[.;]Ҹw 4H7xpw A4[\{Grܙ_u֑lWjj:* m6)kk< ebȨi 89y99t@PGb4:=+,>۩AewG_K@)/C0@d Pc(C@74:+̿>VL.!!?)'+ PWthC@@`B98<==-!LOtz [:crK zZGh+^9e 7.vw  l@@+yUv` mhy9Z~6%@^J`\Ynn %re9 O zn7_;x}6 "ݝ9t w&"lP''zYq -~ y.>|,=;Fh\\kh 'h~|W{\׿Le ;zrIȲUu//?x;%48Mo0=Lyf-pqZ=?ΰH ɻ;:Q3ԖN G I}5TЪANUZ>lBAn /jec4Gq>Zl\{/+홖TeVctuF{gzGk"8 D?  8 ApX 8 p8@@.N99%Pn?sIz%+wW[/dVh s+P)2O n^́ M9`s˜y31kܥOfo2b m^`}3+vwνed%1=4ۘHԙl鄈[UVl!3\+R/k-=QVY@QZ>rjϘb*uZgLj}ʭQL.!bC0De6B^/ckl1DlYcl?fD.ED/Hj`w<̶vA2 K40@FUhz*TCB~C M #>Mly&_R>؈~jIw.:h?|!Tȷ"p4SAΧvQ`DWgiċM-׶A/N]1+]-)D0Ra =tVF~.l^*~xP5y3`?xF7挌>rNfY,-HmL3#ȐQ9kmz(zmR_wф|[S fZ5rxx* QoD*|db$CX&VUspbڭrLbe >BT<$j7̫eoB>,q9/Sd}9I{_&9M3BcU[BicFHri4{aNͦ]Wsc>,[dXڪF|P/7~v"7P`^hqOpv n ;,;#?l\T-}+y&i \F|{6@ZMYD c@X=l; ł_Jv=Nm 4Y@ ޵trnRUʻ2]&8KMo!+HIy߉aA-aoFD Wm2ڡwmX߃=`Pqf`js^tu!_сSPBfݛ9fe\DNlQHNGoWn:P!p{4a7*! ̺֗ro46y*Y-_xFw5}{>MN"IJxwx~v# :*TD_~ߑf{YV~XS/$L;\ZYz&pp휈2 iH,Lտ nDaJyki r=@9hoDPڞC~9VۂRXi]Qd51JN vZ=txॸ ^7nT83M+5z,WS d+5Rw%_gľJ]nU;o-b-4~ }̔eTP\ON3t.TȰzX-L,>A,v'Zs9>סr<;R`^i5UV@  +[!6d c]Aa$>b((XB<Mtz G?˯=ں^z&`ch|9bGRlmzWG@TۄAV{uF#Ru[M+Iw8nRB \1OROͫZA$bJ; vH/|l((q)Dy{v-' onD"Q^dƼ㕅$)gGJ4v}k,5OiR-v7/`&-#ErC݈3ec9`lڗҚn"dXWf H\jR /(Zto Yh;xmEX"!j aefT*➉G%(Ёq^6'[r_$_̏)m^ӂbmu[l]s9o+ax wIς?"w찮7x+5#$gSlo٪l%NS&5cDwY;Z,F g()6bR`;0R9 nܮ=o(a:Ϡ}į+l~&:e.f"^_es/#W<ۄh,,M<,ߺrV.JY&oY 'Kho?7o\vj3ϾH>WF.FA) WQՇd$7eھ8Mk^:kķ6EU Vn7LZc\<\'i_nDu[3ej{6AݙX/}G3')6*{T{>6(F%<6`y;>eeVG+S;{+YDqeW- J*5%gT;TD QӁܮ:kU59u=l)&'J|Q"i<[>'ao~zě?W=MM̒WUx]Ld^"=0 C I-:R]qJwE7X W ).yY~+;adh1A,"V^sbškdY/"WuZޅ=zca&m.BW%fP_{鯷ֽUh fS{9 Dr1; y~'b7(=U"%E'ʤ^ffS#6 ? AۚVWKi|X1%?gW`NiރKuaӐ矯\g2iju#;$ kZoe4)q^nsVM{ރs {zb+h$w UGM\ iA:K^B蘼\0MD0f᫰Iyͭ~A!mi !ߪZDmAyM>ijAE (`m#v7cـ{0[х0b8AM+bb' V zl)P":7Ͷq}ҍ` Ml[%Ȫ_NLYL xzSɣW-,So^nL ,Yr^"Őԧ4.@Dl_A,\yo#Dca‘Ir5JYˍ_˝zjvk՛s9vLO2 ' _;ֹ{;EN͐I_/4 4Jfp&rU +" ^oQo bU{@ Ikぢcwsbq0bkI@l;=|r'~RA?ECʠ(eՍZGCtirBʡZpCҫ9[J6޹z"h|%R0?fV 4laFDc[.sdRM`~UoS!1=c}4eM]`e+Gз7AkH( =yd7s[ O`w_2YX7d&p"c"Gn-!$V'< ۧr.}T6ёaM}cEppX/ij̿HooH*mŢ^DAFڍ+Th_diuڕ?ҺఠTrI#QB.\4h{(R҃H#|h\zc\OO!79.Zr71NXg$bKamaXsHK#\I{3> >&g,IJEn2 sPvΨ`sF}tݏ  hokdjuL$y<%Bb =$GcgW8E,W5osVvhlT54sldTlD7Ovr/䱓#~dJLUɭɱd%-x;`Dupn/\phnώB8jߌ50ZKdCڲnf2̀)}RulE +=BKZqIc`A[).0:Fء}0m̕H<:"qt[ Uj:ZVW׾99zAqH'dž~l}(&`72~b"[҆y6Z5T̅熩"Jg eFhmlz>vM9 ƺXٟ,H[*&=aov{2Vߡ$SH v1ܙCtJ F-Lvrg_6E^9tB}PAxmOfGCCѲ }lCRMk~jތ:Pтmjc9(6"sS  qf)a(+yIt)9&-JL}i7ܝu7';Zƻ?94;-( Z8F_C4嶽_{qf$F8KAܫ+d97yeyNGu"M 槺Vu߾_s[0R M0Xd ~' te-j9zdu>!s 6бe S5HzWS >x5.8cޅV |˴O$޲d21"JxOY|w!ɴ|ݫ]x<ۄ)2O,e7Xh>to?^r]Cs.xVL$8gBE0+H5C8Z_谀֌$_&sEVI]zZG Zo9hޕ+w[~ǤeBHC,ZtZ%بw`eFیOdRwJgJ _rKPNO OV?'xRc~$ԟ%ဩk%5҆i ߃^Pgq>^۷R{xY %}]]0Ӌ@- Ks"7Ubs󂒚G߇,ặp#3jK"#$30@/S(oZi*%768g̢@s1 5?Uj!tϸ/-^ jhiXpB]zS {MSc]PfAg|7|y̏ .ZKjVlɯnx~J$P+N8ާzf epLL +|zhYf=Rq|JפTNvZ ,}½2uIqGNx7Sb1grw<'8yRҔp\Y EK"]rѳεr7L_d#ZV~߽]&uQZ.mtIbQq+V媮_bIB(뼧Sy' #%Jq }eu2:ځiBw`{1\ԩܗjKB>ẘbiF/eDMd)4NF8Chi@8] !>3ywlr7E[%p8R\)5zիN"Jp{vM$k k21dzR?!t1Kb [6W/bdF̋/xrcw&֚=f|^;^6GުfUחK/Ԫa:&s1}QzfI{gc_}oAR`JX?J3aNgݍ)oۯ}5pIk~p*2>ȶ8]rSO+NK1`G7XgDh…$BFC3S~ *豆TqP Iwk:Ǟ|Bz*%?< ٥h\oѯ?~\I #v^c)3bL9ifWlf!qp"]mE.B@ ~)֠uy$4t"ls.(S38܌ <]{n20O!G_"wWM\ k gD_ ;?ius bMlZaŲ0}=%!4L8JT>$>YZGZ(uvvU ߨb`dUUhMi7@meSO I=CK V`-fRH%2b,*ԓBTx*2UwXyS 솵"Ҝͤ,a` endstream endobj 462 0 obj << /Length1 2537 /Length2 16607 /Length3 0 /Length 18084 /Filter /FlateDecode >> stream xڌvsxnlۙضƶ35Nc۶FӤѝ}v><3̵:Pэ `e`feeGҰq Pi]\m!#4u$M@N67??++\6Ef'G+Кxsؘ:Mݬ u'svssgad6upevrcxڸYԀ@௴JcFhX۸di6@GWPS(;-oF`cf2d/eSss'gSGoG+=,0uK oajcojWi1U)(djbjcW,ZBW|6.@sPYnlihaW*,6݁rЬn.VVV^Pc@/skhx;d ` Joc  zn.@2!,lf@+G?Ad1h \l!d!h,,Zj:Z '7[\ `bpmH?CU xA=3:Rr 0@g XXA_lS/װecwLK?2vwh)6[qw\97SZ9ZFdWi/dͿjQ`bce_<ЮہWt{r4wkع..by0;:T N. "߈"X$ ^`X 6 <(A ȃ߈dST,jEhA Zȃ߈o5u5u7/2h(l\rbȮh,!77TLȘ=h"/? ߐ?$@Q4,r;hR 6b;HEOP2 5 ? +vj _:Yr?Rv/6(?l1g}_md t݋O8A%swGW?f@0Gg@ T?B@Ԕ $ J(r"ڃ`P³+'̿MɖTчTO f À"8gnTGf${"="{IVK%,1Q2 !Iea52ʲ0=iz[=|Cl9Yݍa<$i1\ʌPguhxcqt-cU J!Uh@r4{ǽ\"}(2R-gY8@Y\^)\ς8r+ѳV'ab_7,% j|Ъ4aw_uX8KO$boBhbs+G$>{Pаm#5..d+ĦhkӪD,iW ,6_NtBxMffYuXb3V=?n1S|X/̟ũCFifƃ=D>#2Hi"a/&k!Zk:@CGAWG Lpcaމ D4מ8o&LȵqP.amAӟd4Chk] SᩓP y=3>:ŢezOWNַK2Ip&+Lw.r+NgDDB@Oi50\x5/`,2)zz4U %Q|aҊ3xm*\'h;P`HQ tmj0~>/*I5(/Z:}K;[vY` ` #X2z(mnbp2P!81 ӛŐ& K-mZB'4r>Qu'&1C=V(K孢 $2ƕ"E%a3wԩHۆ& P>F0R*ΗBikpxT:0'Ig7KZ{vr`}3N.ħuTQYЫ^n-Pc< WUT 28,O2G(ybKM(E4%"P[nQeشȍ Ja`(N PEpc$=[=7k:|oۏ#M^YLktpt1,aAC|Jas7L⡡wxY#RpM[bE*ZiAI ?Y=zx+g/o$o>0 >a LOH2>pre{\Tn=|zguEgC=CmVei#NE_? QK-Y봱dj*W1㒇t 92V'CkfX5tUo*ϔmr s.4/1q$>̕9rrPFz;ܚ 8e]I^B$PN8/JS}UeSnL#( ' z[x"t㴭q~ İ{4cj4͑6gW9 6X.o,C_~UқH0!B`JV╷ %()0>7AERԿ h17uczf>H)fĴP5].2/1r@MSc^w{t- "4SMh^-)G͞"++J%MziP4b_Ϙ;7>\@+r®3+5}izU{@eĂ5Dkwg(D~yǫ_pͻ_脾p,nEr GQ$>dCT]}qr6:k S Ϗq³m:q~Ǽ|%kqO{Q7JSS0'uoY$qvVUL|S gNԂN)Hvr:Ci=_PUkZ˵N7|9ta鞬[amYIc3>M >Jk#"D֮%O7d2y~/ ,BQR-h:e$IQY'+9"1ܙ FeC@Ɛr(ǙX<FCMHHon_^3yK ކxf@tןLQؗ=Z`X7B8ɆM|v㊍: 蔕m n3]K+=Q6ȶQ ||j+w"C ,Xo{` ;>+,VʜѲzE6BtGC+\:׻v8Q G/!jvޚz&E~Pփ 붕X^3" ?@Dn\[I>t>NBNuHr\E>w"Shbn9Yv]>սS{%VˁyN-V"ad834W(]/Eq<- T(4OVA#}]xZKuaZ6g6.ӛԭN6kǵX<ݕKѺ?[98DC$D;ܛm[3 O1wL {;t0_B^ՌZxsn;>aؖ7>y ,0%n+1kv]%("͑P<'5pf$ʏ%X{ՓG>bxoj&k`g?HViw*,o ZoOwRDUϯ/x;vC.:&nBmP~';Dk@s6Bawbilwer~-qh`0!d#/0>X{cc%# lhone,מ _W3.Lz9(I "bۉ$ͱ.iU{NB9P{n#t3젪M(6oFk񈖾3I|^PФZkk4uAnՍ9o8|I}*lB*Ո&:7ʕ3GF,`ʯ {ԣfsV#bf—1Bou*DWu+tٱi Aq3r6=%_X|18Ba.۶I 0ې5ՏE%1b(J^ 65V4.9(C wJH;|#=  ]=C=Iܱ/`3k*9n͚<Ȼ\ƛ@?4)b#0nB '{ZLNv@@QJIǂlsn]6 >!ʨ>+_)kK /;$Muk1_U]U %a~[|fQpna[r]Kq9^k{Z.?@ͯU%m8}:X~>i>#J_&mm&mʚv(.CPF8_rHqn wg 9*!誾V DJ~؁}$wvY)ŗGj6= %@N$`+N&@U1%G}su؋ ZLc> :NTr4`Bep-K5(~x'oWĝB5&L?y,wn(a[8٭v."_TN;igy"@|Iߗ`\3$+k+Ws2AM FwU12K,R2Cg Q%?*p:ϗW:F&[0xHОGͩ2DpZ(J՝o!r)fO'{{A{^)5z?ptB B lY;'ߴ.N0Ge$VʥhDf}^2]úE.GHjԋkԴ!2VzQ፫! Y1Ŷ jǞ#ROpHVLEͻC8XCj= ܏-3LD;c2t=1s*\+ PAa(s)>ʗE+t7T2s&g+ڃlρbeQ!N~Rz_ISk2p  Ra3.~fy_ll:258 H?| Qu3GX vw b /*1;AgjY!z9a2k<<t3 زess V<sO[PGH#,Mh<;t'U-[WSCOvd䯂̰f9w,Ոn/ĔY;eM+tGdH橄iMr-ܼ熯_"y> Op%KWx`'Lf(~} VrrՔ,׼)Fص^ ~\џN^zP9eu7Ci'FDXbua"t{I:Y5#dmPm`[y1|Eצo-i %>HЪgͷ \f0e/7ؑc& GtjC!yS.{顰J)啗nm\8e6~L"J]q $WA{O-̼/q02jCP*C/gCy iY} zF=+L;j!Hpk~ެat]¶FvfP:J]Fj< yRZѵ?RC$$o\+%< cN ,U]O"&[s?v8q:;?Éy0m}Xǩ{ IgQ54{>͂jup,x}і;8D{9`vI]CCO߽DMXŮasG&ȹaNi8gQ~sFx ׂØmrK*Iݓ9qX Yj,P!Oǀ4J\3tdb:gĺS&0(J}6R۵!$=-袍Q'5 A #2ޱ ЯȔttFIG5DAL3ua_.uc"/t^7fIr0/aXk+]Wc~ϢC= Q/u3Sy(ll = zKfs=/\0wCRy5vut6m 2/3T%,2 ` jhˀߟзga\`|QKqxG36>( [=nGi7|>o'XN81%n?ΓZwu<5ܺP6 (?R5i+΀zE Ee!n`~I$jb2 hLm s_1,݋=-/GOvTM]1Cյ&ٴǙ9G.fʵ~16sq]MXdT<1v\{U9D=\+ ;ӟP_yPO1J{82T\NB24 PyVYQD`ɕm~ FecAj'59ف qJOJ]aq곝pSUd52UmԎv$LQ5?W9$BGO[3r>:N0Ϣ'β[ 2y{pvT5Tÿyc)SuKNmۀ:zz٠8+BEXX,:oRL6I~62#cSvzt?)#KU]+Q!X+N;+En>^PqaLR_דЍ]3Kz&)t)Aͻ6Erԥ@N{ݺvɵj<>".4We]\^b.W) 4 ~eDJd68ي{p^nhӬ֗ԵAT/g|nm=6{A0LqA|KJY=U vqɜFu6K#ܱgo }>Bya=~XfH̃fx'GH PML>eod2yi]lWaZfYjDIČr!!IVTbQQ 9o상;V/Wjy̡oQq{k23 h|:a/^J f!_{̑ȨEClems^oN0PJjþ_8LE$W=D]}rK:)cߍfBKNծ4 ~, Y+sp!x2 Xxܣ=k"Ab'x%M\,7ʇNȆ3!ޠ$y+^5B쎶qu*%6٬ ^OwͰS 4kԳ5HMӦ>?PՍ8GZJ㊐:Hb .zM5zQyjx.vS]k#eMmrGHpWu.LIj`><;s]ҶW?_2^ a6c{y)*>7|ܯifj7IQ*%zeK1lwy\+X{?ϰXq24IIX]bgg6bx|[a{}):9|?G.`> y 9Yك툸 t/js9ŧZs:фD߻df%ʕs޷  ,H^d^|CvѾpZ̎  P*|^EԄD"VVWȃy$Il3 bCrЎɥg }F/&.R(U2leʖYX¶&NJb;qB> hr {T8]+t%OSTA6H)]>MrʒҺmlĄ~8Dhj$y`Zz42t_G T7 e(鋨(oUx{[&yWeV3LF\N y^9&?c7 p¸fO&8 ^~ǭ*),r h{8%r RA$2!FkJҔO{q'jJ+HnN8yRen<Zpmóe-Q;~ s4f!sP#jihB =-~yB&I QFs p58F`aIКLRB\RWc$u-ȬBȿux17raT$$f2ړ>+pJYCs8!82/C) fR*9U-wUΊ^)oOl h Qv r&ke,}#4p_bz%e76*؏J&8j;t+] #WY/ o/,ЈVc*eUi ~ݴn%xg?ٮѪj$BNE+d*s5r=@[K~2ZIWXX^ GD}&?,A{mKj'PhVb{ߙUnfSj! NwIYwbX[`UGIe7'* q tBPټLSH1WkӳjjUU}HJyx-'_ՕM5 hE/БxfMzds)M(eVa ^Qt)4G̖:{eU.GҞVkxo(-Z}oM4'Osoʔm:纳C=ͦ z#KF ~+ b]Vɡ` qừ@bLԉu`C Nꥱ[D ^K|(뻬I i.БX%[F*a519P(!bl/A9 G>84 Hkm6?b{ < %ByHb8_Z"4!m0=gd ۾-1{7"Vׂ=*W=ð;: (>vBs^hҍMM‘jQ='O~AsNQtBMp |hپCN0,f bRt~Fw :Y`qA+75l>1䚗l7\7Ն17#=+9A5vdBs[b.,,ha·C ʎ|)LUόMP`efs%Qգ`uU[JCj$/B̗PpFK;ͫ3Gp!TdH~n% i qEڱECخ9烋yE=*sfBZ$LM:V4n4; * |`SKBSJ'I*ͨ؆"d 0£mӧ3/ PgC/j2xlNwŤS~ӆ8Bf_UIiH&Vc>['M_uUy;}P"f>Tn`~`B{ ߪp2 m)lD(v#A_20U||`܏=Duj xKb ,'o)/t> n |2q$v5V7VdǸL5ۜ41'mXZ#;( "'Rl-D33VPsA2ƼE%ʙp9q 4H= oTw+AʄH{pg*mmjf]h"b&lՄNጢ \/SL=L}6l1_MRjPy-ݷ-$պ>tUvQF_-*<%|VxCkF:֮ >&GIFZ*rmm_gQ'^CوYXgN^R3Ʀ5\0$nbn`!=&^cZ0 =Ԝz8'()m5!-[1n:==;yH;'PVonU\}lqw#ׇqaUD'w0w +5kĸcqW)7kPW@a_c[aoa%6(.(N՛%hr>Z#>nwm KLG~̼=WHg)iB^qNr-DyRܐhj :[}މvy 4*)L3vLuYA;냐̮eq(vTL7$VT{'T@!gzCݜ_ s :-gCp A:BWRft/Ё-Y Y0j:y܄iiz idqK)rSZGLcex^Jwڅ-9MVw^"Ǘ1W? EF}n* v΋H!HzBR1's+7H m je54iX9`سEQt‚[o1? ~SҼɐ,=?gIU/k|_X8Et>} J3?apMDvW5uP.g}p)`o[5S&6SAU! U.u ~]Cs38aqzj:EoM}\k9pλ#ю%aq7(BQvie7%(g2+xF3%Zq/OR9xh{ـdzMVs%ıtFhH7&.W5<7YQYŭ!ße*zGe3[9dx7.[rI,$eSGp;R~90 u ԄDKYāSN\0;/#ըoTG~Vh1f{e_5;='Ioz6l\eF6{ Uo.Fa=e.8m{/{H}aaF`g.!Q1mԎL/HOwɠJ7M̉eY5˝(>_#(gK:*JԺ-Mmwr>Oo&?PO[< d+f,6%W-S gn#3V~ Y![lˏp\7@x;|¹cqmAc5nG$|U8ȍGWR'?,l?ԗ@u; mY(̎V\ZV| BmQoꒀj0ΧlU{HC J :N'+;W~}yj Oy5oٴO8>($xw(XZbrQAZxo{PB%QqފfNG58}=$N&Q/:knW6ܙYxrIw$)錑MhՋ401YqJpV& >#TK?J5\zz_/j6O&ӨH_.s-kߴ=(靣CyHy S\:Mbiycbxth,ߙEI JU7 L?(*0ԦT0F{t[D]!f\7{VS@܄bW/d!QYmtӁD-NV>Ϡėns;Qbh^ޔLJ8 g i8 8h;fӼ 9oT6OnJPыʙU3Kkqߝc{~idmerrkR#+oEm˸WV L-f'aߵ!k\^K fr2CiCjn(i~E,s@2Kd)G{^vJ ފŚgvt({Agl[oh"۠Z^An {θ+e|m1-n'cX<gq&gLp7,] *?Z;/\ՆnR+6Qy،܂#q}ĒY0w3$,y8*?TZtnY&ړD)!h]WKD뾏~G`=͘4efaMvR 4d,Ap3*W~T2M ?n êZVtF$ 9x$ZXG ?H2y@w:F.$R x!w[sO"doD/_'+,^FkgɏŸ(R[`QKw!#ڮt >{ LXliTB ^}rZ V+i USZJ~WDZw.QKU6~! bԳcuf]^mVSEiy.C ;k>k ]J덌Ka>P_WpU}//@~XrLvsjfE8\zFE=GzDV~Bk&lgiWP\+hT endstream endobj 464 0 obj << /Length1 1401 /Length2 6170 /Length3 0 /Length 7122 /Filter /FlateDecode >> stream xڍwT6RuܬxTT Wm ~7o?z 7͌+Mmœ*Oz.}o~n%bsTp]ڮ7Y{$bpԟkۼ5#x&Q6 MsA~7BoqK^ ىס#64cO0 kZ)_οjBJl@11c-\Vz #<곉9q),C_` =w"K}`dWjew|Uz_|&PEɋ+$ ~ڼ`~q1͆aM6z],$\ 5k>2OJ}I/؆II )z)~z%>kI6~i\L|/K+84 I _\[Y BC'>+hJ}RUjԹįCm`# DS*_޹GxoY|{$[b8H)᷁MN~ƶMeDZ6o{6X-͔}7u1q7pd=آFN k{]D8Bz0sZGzntm+a6Ş-o^YVa[5QXnr)/"c ď =[ZsV8[&7]:sONZ?;88Fpg6~%\~+]rڨӀr\ [8NUGpEM O/Ox~LLzTwڙ146{H4JV5rR-~.XRX٥9$[rK 9x32ۚᴉ8S*QQc:(`-hE4|͚&&#g.v,J)۞zRV16[QgmC״0%|u'c 6^k$IaT-bCM08J'UCYM Sxr,t]@xWy~f}%{`;ǻ4/m4 f<`TAĎ Ҽ3[d[w+Qn3 Ki7u%{s0/7D5Z7]&W ւ$]a3N/Z_Il>c:_z 'c2R ZdOb_kXueF+YիJF0a\,?9+ghh>| 7nWP]Vx&X\\K륤la5׆Aaamp gdִCW}St] .R1c䚜E"Ong̢9R#_xu\UY:{kgIAK&Zv| ]SڏڕĤiB IsM+N \'j;Ï$vn0N ?,ٿ>8ɑ1ӾE1̥A>#]7yJqKy1 / J'/Ned&I6 sswݟ/ =q_ O\2Gⴆz}0<9 E =eۦ2y.~`J̕Kw{n54uΜ֏6YX;+O#iMM^&vsx(AL8p}7 g9-v'Dة~$tƗ{NXMQ{T"*Y pqq|%3‡uڌ,tqWX*6M"hа[D}H|]e?uqcf?/yqso}q9oxl> Emɩ `3 ~e.<])*&(D=k5lYa}Mڬd}3 (dRIDꢣ>YQqulo<26'JWhT^I E& (mdݩ_,@ ϺS^>CLFmŒ%v3 e {B1< cT!{^J_u^R̴pή%9( ._a6tae:[Z;i֗oi-5?kJ`DPb:!1Mr4sՒ[L}ޝ($!՝/AńQ oU3Ce&-g[ncM#޺u>kd.U#y ˬQjU&ڢ{Ԁd:Ajh:{>PF2I&sJX}'Q$KH(AiafnM_Qv{-U~P27m ʺqy^:`T%̉7iqgUicc 8P.i{#n(fMR5AgᵳMA?72{zq.TOXVGOdY[2ݽcJ/j|2A>,K{?Ve.ikqк2UbX/S?Hk':oD`(C35Zg~^%X]z3ۮP몏O;Hɪ-U>%jؤV48۲D)a_Lz /?%v+- Bm~łB 1\&0 :vci8V071 9[GUf% -ձ,$ ~.RxKzR uRh| إy_UܹP+u7&eNþh3 >F}L$yF) w*QUty11V38 .<,7MJe}:T&r/Z0Mx, ȒW刧lJ75*i߅ WW"J ŗ#- ~M/ 0qlDdAcU]ʿvE&?&V0$f|A8LWsF:S e#x?U; ^Ƈypw C@ydch죅cm ~̛TIٞgny4`/gvIwvV#s0Yى{n\|JwsN,4/2tqTm"os&$ Ș9MnMȣ&jg&N?gZxW@^S"l1A%Z}<(BR>h&Sύ]Β`Za }SPcW.IX£L>Rv$wR3 &qfVc!!d.V~TFoBF}wd,(+XlHΨ;T%[ő%w6|%UvPNysy%Iw_ѯV-@bN3y\i{߹}c]~,N Qus I\0+tQJo\|e~sB`ёuzGWh iq-탉 [j˩nmdHX+){H4MQsl#"]gưBP[cCoŠg3b9#'( )&x?YVp5IM0\fyZZL,2xxe"X ~U5R .$|PwU+%~sJxevOGRjx&iLc$#`f^Bů2lIvGQ~ Uwo@7Ypy02ŸCJ"u1gf+T“߉N Y8%'6zVD`s1O#%zKmOyH>"%gu5bY<{>Gc_X<v^WСQ 38m#ۙ{Y 鞽Z˶ckv7JD2_(N7݋ %VJFBGPRZbf 3&yyWú]S_ Ǐh[`psy!8n_cWVDハV@?k6< /)jߩMt^]|Y˨%j\o^mSK2ٮi)$8’ڦc~LHS9[ϙʦ3Jo-^QѫI˧6y\cg9 g;gw`K]|煒HNf6Ûit .޾r64%JpH0HҮ L`k`];7fJj](zuʮ =?C; q2c_w~p%寮kG ROa xVu)AG* |ДM(ռpS8NJy,׏x2bLj!Vt2!Z1@Cc8 it(DshzAOĶla@eM9b˅}Wyq x5b34do-Љ::|[8[ _oSs&Wi_3s髌CӖϗH {?E FU](k-G=AiO=kT6eBVƙK&ҁ=\pRAECJ6K7<:q/x5Ǔm!8]ؤFC1NNb 1@,|CJ )B$n1:B<$aY}a4K{26E0!A^z{&TOeǺ\xm/p+]'n g%եW^*s9v.jTGhډQ![yl >7`g Eڹ$SfMryᖶ&eҟ5.TRvX}PJFXKNHlBP9.3ZoUz7m mM e ZL3>S`c|p>$O5t~_`+)3]-5/|3>GR__3s{3?wa~ endstream endobj 466 0 obj << /Length1 1381 /Length2 5936 /Length3 0 /Length 6878 /Filter /FlateDecode >> stream xڍwX>!C 0J;G+1ƀQ҈ Ғ]M8q<{:|E'o h>A~$PQ VA a~HnG;v؍a(w8!E ؔ h Rjx8%$A $*A<@m~s+"]}Pp{4_@N(PPBBw8PC!6sTB`$C+*) qqGex^pCyl@\``:vh/ Pば@PNgDp`tq |{UG{y/ xB w>ݡ(+ڝkH_i0UDhw(>{iGUw+ ˜ah(D܀0o>N_fHWf?Ce ecmECf۫IDA@APLTDzG: _Fճ4S$\Ab rCG Qs?Sw鿲dT<8D? OhA jC0[{$Z aazp4a78g8tz`B@adu113č\a%3Tc$+0IڰHl$~e-c^( U444fhQ3Ho-kl: Epd/>Y~Ϊ)p H*!1E{7 M,$rxEvf:*ŃM۶wc/ _sąΒ|5S5Kmu~ƌ=t` M͉4D zTs8a.GÄO!tHxd)B3gNOkJijH'&lF 嫡 /ҙ-X-?@@ 0$ ~LJˀ_XN)\JB훗,ݥy%Zb`6 _K T@%׳YFFf^9a?Es4RrJ]|0,~gyDpL XmgvW5jQ:&^QPO鄲wmN~ԧ),xϤˬ>JۨGZMTxطWEŢ7kh"Ljp_=xxI Ȫ]&e.~@ieI^8MƔ&LK>a+SIiheGO蛐jAvMOM1Q7aͬr8#o 58)b²83[] b$ʶ y9u}iy]3Pa)$JeXطqwdP'[M2/+KB)L^P",euPZO^煩OwayzIvb`oq_uߨOZ$($eJyj8%3pQXc6~v ټEh6 &ZsE)5_LG}*4>/Z 7Zdpuze1Mُw'oUn>).ZEв,%m=I@Hϊ7 Yd(O(w QOMO[Ac]7=|}<(dDSP7WUJ1@h7]$zT#wiT/Mpj޶oy#wTDiT$?L 󢂚y]a=2;ѧJԍU9Օ+L[@by g1V@#Ƀ2S%Jo,YgڭRrjvLE(aKL]7=[Fl.D4qÉ!P2QvMVg ~2yl=W=CH¸KkT`Z*akguDibA̋F-_83XXNHo6߭Y|Wdi.⑒RDcQ*PkIDU6 z5Sij.zjji_s~{qg~*qaA\>msy㵠 0ᚄķecl8ʃW(U2,8>XK'1~8sȸCRE꣠Wc @O"1Ss1jc5a R O+捖I +.m21)J}u{]4+fKnp}6(aNE,w2FSNvׂ/srX9Uf_hn0]|;qQ=]9}{]ijA5ys-́k0q93ȝ穂,A/8<³VdĴ2`5~-ާJ?X>dP$D q+M--LhY2)H- :W[9b Ӓ {\l~:sd~+£O^AuHAF#y=$ fzs2lWQo64.=Un&3GoUh, V.۷]dxmed4iO<ܩAMz+^^ |Ѫ4W7eu1;<2<&݌9|şp 3U{Vⷌ'RxIkxfZ<56=I!*k }84'=UcX"L<"-n Y[#3ɗz3' hAɳn$/k4eΪ6.IgE@ԺTKš~~8 0E-2X?Nyw[hea%3ntpոΏm\PE)kwlxWMEэPE9SBq+'F 'T}ȳdH.kq^Ys vByÌ6%qd>imܵBؽίVRG ,4w(Kd1$Tv|#cpR7',d,r 'gLO4\xžLyZʩIe  nGb&j!.z}ƛU(,h_--$0fDfocfaY)kMQ>JһOAɚ:/&iTGdSUn (6HVi>EkD {$UpYLgӄMȥ^;cc:ptA؍Kw/dݲ4C*Y͓ 󪓱TFz3 V26m*c0O➒@R'OH1} EVv_>n!,bUm͠0!ҾSksKSiRۀ/f dо5EFh@m7;ŰݼB_fIOAZ#|̈fY|$J<ߙa`6HV$els|2|g)mvMVˋ 2(ARIǟ ^*epm.;dB?_X^?㪍 QЦϹfJm ` FДM#On>ۢs?8Rng/'WI/I cv7;?7 /ް8F$Yn=Ͳ)="14\xt}ON~)?Sm&ueyR ̍R !\W4jZ97_IEN[ J~ -i|onQLYgCI|ѳBcŸ7X)9;VthvUfnUohMGUe5#/WmOr2 㟅h $i 'x;!ZK.l(ΰL\wNWi6ξ[!GS<ѐdG|E,[%Q:;GxjK]tх'w}6RY?/Rx~8Ǣ9JAdfv,ٽk@*'k40  * &o6EjLٶ#1hZabjc/ 7T3v5}L̅BR x2`0RPv%$,cםk[BRN Eh|YB@[xBHH{]yl.w2*mz\Kþ&ϭE? =eBUPz9u;D'm:/o-gbZ-8rۨbb?M<_ƖJ?Zg >:D尢hS`GbDMAb&*K˓4TKt*]]dXф5nߧ"R:ZZXDCZܔk}fkWJڼ1_ʎi=S$AJK7 /OoP'np◛z!_ukzÁ7_! Տ,Y,̈́!o(fytwt O_2Q } . -JY 5KfQ&Lwa!qe$.hlb7v٦';IjYàw)?$e3)vNKVw{RӗfS[OB-F&'_2?o472p8*r K:ؖ0G`2%itq` F:qE}N!~oZ,umо낵 {S׾ $H@dr"fK2HNWS SHEUKJ鿀f}urDv:V9 rny.[gD]| endstream endobj 468 0 obj << /Length1 1461 /Length2 6532 /Length3 0 /Length 7526 /Filter /FlateDecode >> stream xڍx4[wC:{c0at轅{ z] (In{oZ3g9>3kXu xv`e8 (h@ /(bA@8,`w& `[SE Zp@   Po"]hh0@#sC@0- \d A0_)%7q>>ooo^[W^47p=^`{ڶ? 9 o[w0 @! 0  4:n`_dͿ܀?O?ѿA`mA -s8@`&/ "B=x[/[I]-@YN`O wƒ#߯4mV+]]0ί!`r} r~ag<) !0G0 > '_ ;nd@a =o `!v`G H<w?'K0?Ggdl8>10/ ;-O+V U.r. ߹H1PBwM忲?){B+T'9Zp,jktO!l sD*/_8Cׅ @N/׼A!0.AFCyx FпUM@H`n닃`<$:Nƛgsse;&l0Zᅿ&VyʱL݅ ǽy!uc‘`z{8Hf:Q[uHnms=+1/߽̌y1j0"Ph$B3EBo3O4kk"IHRȒ[]:HX@U1FAM6:Qy/zGl&Xd~@(Ț ߷Ϛ L͡1i=3A/u%fbKQm:G?&Uf /V5' +|"1vpZ/9x$eCCTĹtXc_eYARQ32 ۔n&=e {ʲ?Dp+#}`"?`w@[Mr(Zzz5 V "EDfM-B(i1!sɫ=?E!A"-^j4wzYUc9^ %yzN8Tcy].vm9K]+ʆlR{U?vЅ`$ؑ}Q#]qq}I¶ ,:{٪aĒ˺l /ǖ縇ʴddVS8E?PH-{kY n6:G}Jt8"[3,UpR^:|"Rtt&Q䟝&>;xfh1CMwyz+YKhj=Z [=G.:RUFOPϺpg[.9EY|dܿjXؙ} yzbI$ MOhhc KZ[a4;E]>\jX%2`*HIB1:"Z*-ecd'Ov2EL_TXaGOfp=́߻NFgWiW9xz@i]LYmqcb8Ƕ"lGfQMr"NӉ.A,`{vq7]WQຎEԚ_SiN9[6MIQcs5UK޷k|:;m$7}p ֻ, 1 Ydy |^s): {YXIٛ. KX+&Xr7(q_aٷ6j }&~b >zT\@?Jxz-Ekh_9b!ZKpIE8t ߲P"M\WvFܷXjH ߲{O&GcZR#*?O ͋~3m1X*5R\ FUcG7]ZhHkB@'To -P C|<&"tc8_(.(/U9D!LSG@ېƪ~cH1q`@OaN|HV*Փw TɧG osatlF+, 'fEb82'Ԟk4I| ysc_rͿۯ U<3G h:O$̺V0<|q"!qJB32-*oє92NEsb8Ǿr0^2^?MsoE;|oW8ۻKW/SMRG+mۛQ,,T!_^ӰD4ڙW߶h PJ5 \:*¦(*U^ѭ2KmO ~ɋ>0f"a1]C)nnʜU뗳uf'`"D3hba4߻;s9Ǘ5PRXmS%:Q9>ʥ8ipj.7-qvx>aΰ3?;iIsb_}䫕"ƢXCV>oFQW(19L> A VFʉC&]+\% D0*}7()s0+r9 H(D u ~ޘwu;9T7կeY }푧7>{|,TN,RP֚@HDqs읍}*ծ''V,O?jŠ*N:@~)`z' 2Ͻ^6_p;S\0ЄNMd_a_ICXةGdXi;sؖ⪞svrd訣A)y2 BKJ 99N@U~% /W0FݺIo {At5n5瘷y 9DEsmW%Mq y6׎^(l j4k{4xz'{tO]?y^SQh,^ѥ.SDd$WUY7XDXŞ~ lpt%f7SIڼ~(a3G%7*Ū*jZwذt>7 f'2n'Xәm,R}CQ* N O-ZFw goߗHg?y&P>&A(ǠDk.EpOuA'^=gAK&yZ˂2+N,PIy1$EfaI`3܏)3%I]U)`4,>h G;7x"5m{&EMl{BW6s=N^bFν|9X~}zBf^Xn*Az'ti͆FgBy|]>[~d|}-7Y~0bo,Hxm5HlGhΫ!сT,Â8kw5#Bw̉QquM=O?Tn,gbjym;S Wx&`r-',PߙPF,$SGƽL9oM1זt~}=_CF"8Vs<8^<"r<FczNq_6l\au`eH#FO#KOȼSQ1ޛm;j/yWR Pi4+T̴MWz xv_My[\i`3wPp(R#"brRRTх:fZya™^NEN a4!ݻkL{,7+6g>%DQ Ne̅(#P\ xL̉&)Z.F7>yko>oV0_n{[O(c5cƘ68>;( K1Iݎ:>溨x)텬mφx-%˿u\MB!CFϻ+)VէQ̯~O+h&dy]Jܺ)|C)Uz<2Y`'%ZCݠ{q?[ zU^m煋wiغ|5\LjwJ^1eP ͵;>hR{wl7ͽE ES`z4ap5Br?|R'K "t5XQf~FMZV=˶]k:(q9 ˋ p9x) /cm r],u 'D4lX|hQ`Uz?T&=a:7ߧql:ҪXW>-,L$JQf7_=(Ǧ\^ųmU@GP|QPİ3 XUc nwR/W N9Cծy#c0oǮbc ƌhҐ$#ӒjMel[[:A򏁷=xڗ]ѪE; ?8!51pOv$0-ۮ3vo](eo"kS_R9vu3,Q^.] s BRxROJS'>OE~isYm7L^-6s, Ku|ս+X0[a-FzF4il4 b~SJ [4<Qy"uFeSu{7`4h37)jS+<>aCacy4ZoXl}wO]`m@ W:Cu-:zDz2n=lRB6Nٞ1kk0jFkW'5٥ N{sW4.00zFHũGjr_Tb;r*/-qa uN)/G7ּi4L JM-m9R'fL?2z]DzY*?3y0mF!r9?, ^#.ɸzעXƶ5ITDEhhS" endstream endobj 470 0 obj << /Length1 1394 /Length2 5993 /Length3 0 /Length 6951 /Filter /FlateDecode >> stream xڍuTo6RictJ#- ۀlS@AAZBQPJx{=;gקu}nh"G?@Fp¢ PMR`17) ,_jԡ8>qwED`,/G4F@ Ю__@>?PTVVZw8PA (>pWA&hG >UNDu{E~!'4F`k` d 7pP 0 pG 8D[h@q {7@Q .P7eC:#@8/r:cx }w9x[w<, taAXE~߲ vqApXԑ "6B{|(ݯ!"f(;B[ G`Y)) Y 9Jom'uEC v @qw6‘0';F9㗏Az!` y ;{((r?4x597ã9K^ϊqUGJYEd|aQG:V6X=Dz[~ tg׫uyˍ=gJˆ؛ktq6xbIIu(CVc?h~QY[RJ3ϳoﻋo5x:f= w迚z[U-^y͏MDS wL] {v鞈lFaPpy{sebBfH:UhZ"TI||T_ | jwx-Ƴ;,0$!QKQAfC\log;j՟D|+3$Dsf㧏5~Д@ܷPsQ:gDIeQ JEkI!ӡ%qSnjK siT>b}u1'Ur9q @yZv^ 57uiߪ4PEsg!Q|D~0[ek-/m~D,BCK4]8|>Y.<{o"g[As4'*&LB!xF0㙆{)מM9\3RXb .ӷa\I|W-He[A+F, ̵py)]ߨfP:.).7h'S^)&j]βye*؛ءn'w%O{ŠEiԛ#:SL>μL֊`7(k6,}7gR%hf*h9qۜ/O+ŸWɵu?]|QOm3 yGŅ"MZ2υ~؄F[ V0#\p#fەALU,WwS39~=&Y;ei.arQoeÛ="հ}ĥoec< N&{݌+r5 Vc; ŐD(9J6; _xTKY8'jtr!3Ab|QS甹^Nmnp##O@#]2&I KC~+L?tDOW|}i:,H!rCStv݄0<Ns]"s,2%pWTڄ'Qί{ D]{5 o!6<ǗTaupEOv25@&NbV4O2ٕ7I93 4⥽Σhe|nqZQΒ6/AEY|K]IQ秛ȟBmHZٟPEQ0.7qwAD X!.w~-6>ݲ8a_rEvyY,$#)C9+re3,0oi4C|Ê gМn\2Q,u X(I-qD-R姅7G#oc'%*|o&Оϟa,k-$W~f&I*5 LR^,cxF(܋^x)>+w4zZAGi2հ"mMv{ -Q9FA4 | O_ i&eCP!,jzgj *j}'ځw>G;,Թf"?T٢ %GZ2(ܤ03#w)t5EqB5mOq<4(FsEϭ˶ <y>y1SΥg\pW'/s`PpTR[T]i](// %&Qwd]jeYΖҵ(R~]bNy[^bn褼.A=T^b:z<҈"WէK/_,&~{ RYt]aS.ȟ@d@f.ARl!MJწͨ/QYǏ]b4>$x8)Sn'չUhL'*mfG( Fa"LC Ǎ71r"<,UYOqr" 7]/ v#gd7C,Tr yɏ"b^'*oACJ8g@>o!Ms%!++]B^ ]Yhh7sg}, wɗqy.&+w. ڴʭM KMl C9qZhC qb~~R'=VSJY`'9L8+,amti.5nQroJ';7 gO[(},f`hct1%w9|T YhhtV5X5T+Tƽdoq&%M H{0 э-~i}dcR]دjysCL-׏%YMc0 t&X[G~(eZVR讉(v:+DfTV eJz{DPhg9 @vnҨPzC)}ZST t2$g9z0wx)]ho^NX5iCUH;}սYqH@A9Ъwv]Euh,`YeП{Kl|/3MeO1Xh A+ iot6u9e<:n3A.uڛ]eDxON\ ѻ%^`^ΙDCܳU/=VZ<ת랸3AgYͥZd'%&n*i}Y!QT[eLߎIoMR p/g0?[jcJbL68E`7Ig^ ړiTep/*+t̃Q2|7%r8PU zsVywvl˂V"H ^(- >0oS][]sM='(8LSѤu,f3@OɃnh1 X0SJ$S:ᇗGVk{y}yg `4,t7TU}km%6gr_0oy|˷ciZ!^U~: V/T,P@*w^-`_L,Gq{hE%{>i#R\Tn/Ԅ$J\xqYoG-U꘼Ӑ9TjF'[H6lг%)&9-q+'m6R~hoڲQr} j ^C{~V8ki1> stream xڌP\ր4Iph5www=wd{*X1*2f&@){V&>,+ J9 q'˻LPsXXXXl,,1wHYr 3{<hM܌DNV %=-@ ?!h,]\ݙ휙,V.53 he߭1!P4,P7wq7vV@+ xPU(;A2V#߇`ebo{ 1 d0\<\ m݌lM . % 0~9:Y9839[#_aޏYd&nog8#U=}6 {w dfWf̚ +GWĿmEd@' zZ2@/{6V?n@+%VV haB] 4߿Xكl=}Jrjr: nJ11{vN6N++[ʂ*Sۿg BXJ t=N_oSWAI"je]߷Ad>X9X8%rXZkj%kl@@{g0^,,Gd6o[|ߡ+ 27k8NNƞwNo4z=f&˻ G__ `K/0!n0K!^,X,8r=zϮ޳+J%|* |=zϠ;/{v?ni_|י۾_$Iuf0J_J?BzK,-ޱ%w?U{?fQ{#WZ~OM9{py8߫wǘRg[cgps8X:q:>ݹ߻gL]?~9/O; hhod]P#Jay*N{OP5@\=?rYg q 㔭F`'-gdgCY@;EmFdr۱uK}9,MO&$HX<v$NoUbs]W"g RǏicl;CeVi''7NR䍦sT{}xϱ7A_hMՅ)3ede,Zd S ŷb/E=Lhv %J"V4 9i˥1f~1e(Ԏh8'eΥ2e`6_{/ȏE:}b$S ˊ^5r,GA"C|&9* 5.k2A:| V )~zh@{,P:l`/<MR)CI1^aSU7>uRmQ,BZ3]Pz0<ڟVoɶ)oĚqS"Iac\}7#Իn)o)/딣IVTO6+ع\ K&|1\?uT'vRL( 3`\͍#K!5;O.OO]0MS:PY; AgHK9yF@}y'(d4& u(3حtk]$;Lc`YYU,F Ţ:dUiz@U)Uޝ><7GP 7JXdYArn*Q*?N1"XG+>ȴrJZD__?c{L-):S7 s9R .5P%Fϗc::Ku=kQeq=%mnH@9j34M*I=,_\å\?Q넝4 Z qFmiU#uc-giQ]CԸ_o_1,)r(`5"r]I,2~*]%&]Zl9r_ϴµ|@D3+֯j*-/̀4 TH/84uQJ%o(.驗 (U+D$Tua)!`\LiKk*D|  ZkNEH]@𥳐>& .ɽh,G(9?!?5Nw+\+r}&YaY1g<&IۼI m5%Sc샙znW{9)Fg|pYړs9XDK 'N^f}v[OR>ˀ~](^Pj 3+@8} eLK (zW}L"Kkz},'c>KlD'NVOoS%iQqWYզ=T*H>b^EaMåeDrnZс¤d**vjk`U>aTgE{zFdR=X|]<_ ދjϸfc=q}E0Hq{B=~3v+^J/aH(׌clg`X~\TtjTM6xVKs`Iα3$U?"O8'7ϿI|jԫTc~2"]zۮZ3Ĭ`V]*v]O({yN${)z[iIZ gԆ?B'3D-A״)f[ޏv)b,5԰(V8]nr uc+|n0RK_V~vr3a3w%>헉bjդ%O]3Y :M$Vi M/yT[~hM0a!*@157[,=`d!H [eAP') ̐u "@Lu-i@!󿙖u\=g_+ >`u$XZ[7% !bze{V݇c냎TZ\OF˦/ @YOڎFa93{;&QH D c_MA),#Jz`LU Гa_ ?oOu;τmɱ5z3>hXJN0;]bXl I18%/ԕcA*@/sC}WC v4QI)E`|iO4qFl3^D(*r V6yMt5֒=:Cn4)+wij&:12ݐnsWB:)!Wx.n]W}@&$l~v2ᬾ Up|6y~~Sپ,2<= EI#( V1zv]>6pw#h}{h4fd"߸cRFݕ,{<ٻ0O+uq/NG`B* Y5SNvA '5iNGG)nEU>_=Ewn"-o[ I"!eJ-0ARf:pq#~GOR+2"41!g`sQnn5tdB}Wa@h\WR-,ܾ9b%)>]5 ;a!5?# [IYE?yc{{7 .39`vtSR"aXɒ sjC\^v7jN%N//5"⡴.Moj,E!. ݨb d[>K|/ "?HG;X}O)WA eYD5#iM:!M@8g*Eq& sk%haLql%mVtnwA&OZ#op4#!},7qQ#!TZ\O^!SQTֱ\#Hn‘)9 qZ6΂n&?!xF}F: /fKꅨda֠h?iu?YiFk3lSH#uJ߾e|P gt[hFwen{;s%r؜/K^N8zDA~Ő+-:o_G 7r42h#ޗg)+™:.Aně z _=-Q&q ?"LEnaȬH|cq~=pҕdEig2zw"na)D0QP6NhOBQ ENsV!q=\i8O/v=|:a+#]f3ߖ~.gr(T(1Eꑭor.x.볔q!Yh(T#kG~~v,[^hs+u W<0i_v/ոtIJ^oxi#кaBNhb^V')Кr3 g*.]تUAo9_+s1zq,L8Ph=1fxB N HĬk/h?xd9ue0:5ɇSIozס1Z/82B67`n?f䊱gP]Ȣnw Dw-wt )aTݩV뒶3nE%rj1mz_  v S?rAm>NTi_af5ˢ{O :pH60JKqxei|oHqDA`fBzb6I#i tX9yu*GԜp@lEr0=ҏeh9 paW6P<4_OEAbr]NMq(v5x(*75Mo[L2S.0aJFLa9qN-Kx D?/](/N5ձ3~[[LɓKCE3IMʂ Ҵ}SjaKgUB dy_'Do#|QLvّbœ 0K֡JD~cV\N &U ĵZpq2A2LȚyZN"Is~1fT0v-k+~oMHAo  Y!(ԞuuM )ַ>{N> fOl_!ȼ|<23x;CyR.,^Tb.[|>d|L+N .a{ G4#YAbMu2{6w.;*#Ga.HWգ@* :kZ/Xa̸i@%>L5ɹL~S#D{Ea]3j-<,?t!.ӁU% lV 3AMIxuaS1xD?*Z&IK;d2Rw#vUd }{Hx5/s'41z{!Jr>oXpY ĝ75 p$]iĶVB52q ~d>QÅzyA1 6Gգ6jo0Bk 5(KdX{V Ŧ)K1dM 6ůߒ$˪?c8y5DCՄd,~.GpKCr`%P6auGdPZ_s OJHyxW TV {9I{G3><Y~\)tn8g9tD j[ wdVިWK"G|д=꟟>|EvfnDw2}++ZMk"f$ &Ҩ8%( SKeq.ED>,76}7+jmylO/.ٻg?'0)c9(oY_2Bk:0FlNOcKdCdD)44@S_1:.LqvVmFǨSiݲbtcb9iMD11ET'B(Fa\GyVbKxe{UpN%cU#,O'#Gm|[0 v<-GYP}eQkn1F6h$WnuKH} id"i:eM 1~IdlśX刉twޗڴp ~wS10ݢ*mޤW\6X\LwRYC1+S+cT* TsS|C2aWHƴ?8'<',Җ*|)ߴEGyhdw[]6 Bfu) 1G>lToՉd:`䱖i .Q& 9ӹ2>^Cy۵߻kh2@֡h;6ZR>4ordh.tIµ]CbWO+>$K蛟\|`3o- E|Ϗg ְ;{ʳF 以 ZJ)먪*Վ0󵭋Ҥ]8xq&j^gRV 2Z%oX}hvk9SQѹ1o]pl7qLY؎E˼$4ϟ;ȃY_8Xw+o[5-ǧs(WfS =ێz/ |pQKi*\i- !Kbש d KȾ~`# Ox7UeA_n Myڅ\P8٭SA,U5>ԄB5{#\,F(ys{P˝M IH;]}{@CJu;LjN`zs#lЛIhRDpWUkPo_}qbś3N@$&(j:sR 8* YPK[( >3Xp1p)2Tٸ2aO9Y3c_aPtFր&b- Nz̦& Pp}DԺqab{D7ν`D;yv߰ʽvpdW_b/& M$o$e0yF/X13T<ȳI)Eo#z;Zޚ--CRQв]10VkM;zjcЊlӃ%Z_ y9M>~x M@葓y%Rw|YĬx`T:`mYjҮ+D4Sf q_ߥ">hrtN)<&~JSv4(x^Nס)1,VX^9 xJm94rJ#Ԍe LPQ"}7n𨠿XOw5,>@'LXI609hFPLV?y,K`ܺ9K&"$./q[d E l 蜻vϏE`1z } LG}y#v U~Cr*y`҄ZnZ <a М8(QN9)>3[χ|N~#I=QΣ ##oc{j+Qq:}UHW{h%C =v|qBv! Dܰͨ){R&ovo>s>L\)3+|. VfR{:N*l AdeA:q;GX J: #ߣc%^cimz{\t?${ a7}MAZ& $QGrm;Z/s`P gg}L\N,2ej |Qg>T(MD)|CXtYXe/;8bh,gC`NDvd-viXr_b]A7D~Gf]jyJOI8F B#PbIxۋ1lHv?j$8%MF~2*dfq9ۂUFbFM-4yB7 0)zZ mga'&|#fT6Y޸2D:n7%+KZZ\I#'Α*O&XZDg5Zf-tR,%"x~Y;ͮZ&οbܬq:g:6G&gN_MWjQ{.'3>*3!~tre]T[tˆmoYfdx^rv.Wz.5BT-c&1aH'K.Wcy)C'Mt]!l3B m{r|}W6Hp/hC;Cso8 y3b:5>%Ϩ'Ga儞Al0Trz4bP-AmY3r 럡B&%#Ī}>lKk0i?>z|էN+byP$'=頃/'|TB`윯 |Sv/o^eW{3 _LJj5f9_@R lu uw-( :ng ;}ku+/[!F|h+cҞ3uId8qA" |5X#ʟ 6TT R4įZ"K2ar5GXa WC/QftHqud;3>gE J69 ԳUyN=ތ(-c*I!-BM#+]-/*2'C^y%/"/Nq~ֶ! EvSMZ)W h0 l{q1t:h'_aYl;NV F ?IҙE%`nMz]KFw+ydル)g쳾]RZ*WP.!zd}7"6bTyp mnS.pS .xh`QQG .:5u#ps ;JCkYEb`sɻ#v ? 孠+1c"MMĖg@:uR5`W˔'g94ʅ~JށCBJ#yk /ֺ:cb"'i\:ĺ3rtȮG(وi|J,ߚƂ ~J%WQ{FJ4ߠ~H)+n-WuX"+#m4@AŊ lĢ.Q5""@u CfX#tuFRVEU@[(.߉۳g5 +M״I'c)_wV)vѴ& {b_BVa ބu2XY1vYF@:X0v43ͳ9yZMcw z ? *1| @r8<u.@V*c)ޢ/9NQE*Em$4lEy4\ԃ<#Ȋ?uHFJߜq1J905ڼyv͓ς3l5]<2ԅ6kX%)6`ԄھPƷCyZ9\ e+s:0OTp5XnFkH@!&#Pc Bɑců Koq}9|S"w%텢Uaw*|ky5x&Pn ɨ]s =e@Gbli?ɟ=# sp.ݰƲ܎]-* r?1Ә[Sh:N]#ZWRcR[s"1^uQ6aR1S[ӳME_itJ S7ykYbiQx5m?7xdxAg ZK%Z.(]5nȮ'R[?,X xEJc#w "S&VY.kDZ#z`Z, RlG{)r ,jlj6}Z?xS'kWCv \*PO`E ?uI{z Y<,8#s[HkvŅ{/ f}qPz¥=;Tۨ[wT>*M?* YltTg)5AQyeJv5\pD|ߩ^m~ wn|!B+.H z!hJnכ [o)&Q>Ad$6q="8~1PfUNGn_ŐZΧh[p2,=aҸ2K]9苿ą*PGBgbG$Z 6-=iQz7H~}j/_O; ')q\ƩTHi~@]^ E ]P -5؟ݖm+.IًӚ쭐Փ# O&׭|Vj,cKH)"= +wv5Z4.Q(춯xMljsxF5NG"Og.dNeמ9c3,s^:A)^TE1T`RSAṊE'<]t Թ8]X7 Kc]r;ɺ:vQ`w1,Ʌǃڿ=̩)ĴE~ RԀ`1*"%q,3puos ֡V$? <ͱ ~aPqRvЯ 7Hqq$ <"3UN8weg%ޝ&Ycޕ LQnuID&BS&{_yP[ЎlϗP;d$JV5*T@R)A .-R ["mx<SA8ӟ۞]r(Μ(;0*s} LXT5$SYSꔇ6R yrEx>G*yhCnȷ9Vo1+1K JZzO}x!U%ܼTfeqJ oK7Q8ӳlXЗN2 ډ:1Zo$dX=_QA]@6εS+i%1{,h xy뭎k|+IaX^ők'S T]\ms&5)kK}OgsЋ,h%I3?Hh^]x1@޵H=w޶˴P ŐxpԹ0u>~24Z}萩\cu^b}fvɂb" KðԃB;kgmq ʎµtcLdqis,r:~:^;΂[R]Z1ǖ,}ru7 -P= Т嫡4(lhDh Yx>QnoW_w,i3Z5g8~rLjI2()mP ?qTHݖQj-0IKuCz!vPKa$n6HUhێ'aqn禖1$ mD;'vߒ+맜dDyP飁(9v]kC'4I£fOA']F>_*?Me0\Eo,/\!4雷?> ]Tl{L5s>0 _+ 4eQ@D`lc-YD`'qFPJ>|8B>>]-^z0nP{xʠS%ӓe55_k)D[TVfj"Iغ-Ѽ,UԥoM|`Y"'s}Vpzj4i@o}<*NмMWL$*#:^8,II}SL*ӷP)H$ki2/#~M[`vStԸBY0~@\ Ib!,ŒH}i. ZuM tnƪȏn\,` f  Rz磇ڂX/$PL 9![O` qhIB~-DҮ'oOo2JIn:X N+¶c|8/;s[ vk hr;Z,cۤϟ7~n xs)60kf뎌یgX()Xmt#M"Rָ{(($u!a<[~-p 4"(k-;8Qzи[MjdZa&7.NoƈPSl}"h$d'$&ğ~ %]텧b+ҭoOjȎs{Lg.> ԣ>Ht$f].~ܟ_92Vh^J_َ)DoapCY6 b14Yֈx7+wa+#6BhLp2@zjXɨ eJ8y/d׆Pm|?o>CY'"Ye6k/Ppܧ~ɼ{T@O!߯SblRp"RKfV]Ԡ@5;W0k aN'VS[񙉙Կ(Y d,jh%Ζ aa1<ڝ!RئF[뚤f5stuXp#ϴ6a%0Elԁ.,u ⁉(c8T[35uG ' IPϟMnؔYUAo"0m\k6h">^[ޘAe7JY2_"JV/xA9Э<<&s#@Xhh4!VA%aT5 ~OM *U\GvF̂O -Z:;Ю hourAAwM3 Ks_qR_#(~1da܄ci7#ѝhvPz>Lٱ1:s_Z)= #L" pP ۟01]$M\+}qyu{MQ{W>D5]Uvc٫/RɫJxٽ;b<جK$݄o+=Z0p#$?I &(QV@Ed*;M؋d⻭"MOcRksqʉ{~&KF2#"Df3gGNJa庰޴܃Yz*+?1C /dp_Ђ.3gqVp-{v%7RZ FL}@v@ ~K٦;XyI=owJ{z䯀(dd,7G(~@9嶏(Q 4?ߙԵ^qirr2}`~$ْU|21H<(Xv,n#B9eq49x=_VY4m;y>dqOK\XIILZ_ +_<\#cO#*<>;{?Huh;_~CrG.EF$ Vy7~aJ `&Cߒ>~Yм(N " +)ZJŽ/U[*} Ad5NG'4^ͩ j:{=|+8/ c@ YG 0|Cߦ %2;,C~$p,|vL@X!펽zpjZ~S$^f fIC6c@9?{HSpS(o}vf Y݄hNlcg'\ΚnO/rYB y\5JdH38g͊VؘlvJ*x',;ɵʇftFVvG82_V.7vVpT- xN 5  ޱ\Tf9.@RlbhXlSdk t(œj4T0Rm8#dJ:yHi'o}>'+'[0BY1kg گtZ }^&beB8i<*& 8gk=4pkkP -Dd[z}偫D Kt ?K4'IT6XI܍fnS",]07S\j`p[01iojԪmT3kx(/z#)h?=ս6~f.x2:X{C(]蟂Q-Z'4_%j%h? ! ӵq&CIb4T~= |0kqTH99kůgnsUTm3h@&$jX#59}C}.YP-.Ŷnkvo| :Ts }hmyLƹhFjq L0 LY TЋ 4 d/whdbP -w!' L{uԫ<]hW` ܙُRTG H4)n]kr(|u% 9Tn^ ` OzDycҎs$]Dy=F/a&]PN3~bQ˘ZMZGGaɤG,P1?}iBZI(}jLzuשBpˬTo1/Fp ܘP4l*1?,SIeHE2V'hN'3#jޯdqlk/>:ԥJqF*,~¡w@RBjlØ`;X"*OF:sqB> stream xڌPXNwkpww'Xp'X@pv;3M2{UUk}sתLb掦 iG7&6fV~&+J?96с  , v67??++.I9@ rEptvrG1{tك=f 7P Z9xzz2]],nVu+d2@h'5f$* G 7O Y\GA.w"@ `cfӿ: 43sw:x[;X,@iEf7/7F!| S+ݯY~р,`.horpsE \wok?da`n+ sw'-kgw?6`o% 9@^fV,hz;RsurtX[[|] ;O ` v@,X{ X`'#;:y6,**rZ R\ `bpqeQZ',| ghY:s sCV.V3/ב3uoDvvi6ϭxMu@/j܀]s֮^ sUk73fo֯=v:ZYLlG^.3[ ɿT ף%c]\HIbXG.HZ`%qX#o `,R"Vo`,E7F`ػoxU#w]7{5#p%~#p,ڿػO7 q-N uOY;mgWK 6kWߥtf4 5!d4ف,s#{ec[l r=ǿs\j83G;/c߹sS7w3fM e)/;:xglaa/n&^kП&t~'. ? ^6փGFk+.0Cg[ &s?i4'?*aw39ts#W B ׿.9 @榿k~': LG=+G$Wd9] G-bqr1Sy:q;)Ͽ@9x{`R? ثT1|@.GUk3?w/ dh&bu['Fɴ;%4GF >6+xZ,ecm[Jt >#I8A}v tѱX "1ߓv-t;d:QiUf狽g 4^OOfo9.SV2S}*(H$ܢ z[ QUNI>sL KcHyp۶TH+HY.=mA$H<"b)9~΃kVNYTx⏣c" F'5Tz_㪊* &?%-QMnҡ4H;ȤjX # UN4 Za6~[%k^w؋yst;7::Ge߶W3!|첋]d*-Z{-`tRm7=|[.uNa>Z ~/ik$M=D+  rxa~e6ytJatS6Ӭo$PHC&PAyt~Z߇ʏ-*%n1SK2&te j)::}I#dqx6#}XΖ&5u@u SauL)ٗ%x!o^?DCѳYzsqVn}fو޷2o ֣9⊐6{+`;qn ޓ %v#4W(<+ϻ?vbb1|(*i)ALƷK>=ջOz#](P8ӑp]_~I. MJ|utOo:#A\YEb# yKc>cfxd&o4ѐrU=_mU%ݨgVHKz)vAdXrmH4Vf:Y|P%qB3?,eO:=D6lTyE}wa7&M)s4p$ 4 [s4¹YRʌJ2h܆"L otchWJ1;[}#ʽ| VyN,+h9MTWP⫇1Ƒlz+eu hLs2Dd/i(kCI&/E04&VoQ45*d߃vZ 0?N&^|3Z^J ^ZД:El T걵]Wꙭ*c9QD2>! %Pg8T+dؒz2"؍[x`SjeEc:nATw; QYF@}u~Q)g9 43iX:z@8NHlg 'qU&1`4ba]faA`RJ㑫E&6̏w\BGCԙz'֤˞?<$jTA>0_cMmë|T(L[(f:x|PSdj }ͤr< <ss)=+tY`G}`~2 H&@ [_9 +v/_x7Rt҈%?v4${&-ȿftS_H4US71 Y ȳ[Ӎ66ַ۱5w!'階E­/.#u.}Z/|R258V7M 'Yym}˲#[ս`v1zdxMwdE/WUh@|d;" L4/~OA"|£ubvvS |%M+NC2L[t~סg54ew8tN(ot Բ Hj-4G~d*3gbwbdwBׇ]"S}a)ȗQxgY5K,ś`ckEȱ W`ǜM&]iAN/$V i J'UyMΏo:$6_C<<tA3&df']oNO<.L|]n(!0 Ό=b֋Lw '&7xn6DgoR,C"5z@>IP؆] }(BJǕ{Q;ĨADk&uU,j}bT4#88¸vcaL T6# s؟zg&tEWKxg=)FȷXF3r5^69I̴H}[+gR{*~ 㽭~sŸ&q%s۱5*kl EE`y+ȈǻU ܵ/SRǬ3!K&`=d޼ 2rՇgw#T.IU!$Oyy_$ [ϡ {f)!峿InAWc Ȝ:7f|C1"ҟT}q< .adGQӗOyնU ȍ*6=_l14^-̸+-hFUܦ}ѷ _!}1tgN!Ȇ'4"M>iZ:ևF¬uf.ץc-]@.Zi cs$ш@cxY5ܡ#˭ [mu=˝OQg4v)3ɻw=*r<,o`|]& P-?Nco.^܆,4ido"%l?6m.3U5E Ě_d҇h[nu滶( 9bq j48޾G%DO/+Y%z&/Fz 1?[:z}:ziÏJuoػgMq?J1k6c.d`sDj45qǑ54CCC-w Omѧ 9UD 1WT7L$]wŧ 'W#M>\ڲS3M⊱ZXGg2LQ$5cTq}yJj>F9 5P`O\6WusG?eYQӔM&n҂.MU lӛgti?q"x1SoȌS*5}I=\UV  ->WX۰dI̯S=b8Q ġpBדW0`잻K:Ajk-x Tyl6:a7wKoȪ] {oKUs ,q@%BG(tg&U|%k̕`Gp|(xCh*XQw^{ʄlz#TSOSBi$e$ c˄QIY2^x) k9ʼxE:׼ၤ!2ɪ/XAu&Q dTAs HMfk kMA{voKV>&rRÒSjT\ +樸fk¼%29 QÎү#ĬZDc~sPgI&CRuKOmq83 JiWBg+.DS5{PBP f*aRL)$,wnN%wF.JQh1DŽd(]q0SVsO7Un 71Twp\!.m|"/ :T_xKdc.F^Uڶ/vtZ`2\)!;5tٽ8"-趟DЅ{{A 5DK"*B{=}ycMRyCl^̗D==J7U3M{KS r{Z!mD $} 1~ $"4{rچF֋#Q1'6a5\Ee4ʼJT RHc!ms~Kx LĚ4ڣO b./'7uݴvfx0F,c?43A[\o9 Dy~Vu[K%ofSQl!!/1A.oQLOg>8G0s4rM]򖠘s5W`b82툹tqxlwRP0fmogg=$Ňu AYn-ciއ3'# ׷Ys0ޒ'ГKeFi"6.;{;v5B깦Rny#-3eIEm6$ rH&+0Ŕ|Ri #TGEej!O9hV2l/#f\ {1%clz:KTD-Er\g-D>wlȠ<^5jߗVh=g@ 3#0kaVPE7;m![J׵҅!-)|.nÞ:JQgBmu[':5(lGdB 9sDo׋ C*~ _i:}f4Jfv/"AN%Ak|࿏>mEo=Э^g#b^5bMȞTHO9Z_kEUzQb\tظT[5#ѧqJ r=Jث6" 6&)pWub]!(z۰.[&&pѾ(@abhS Ԣ{>0eH _c&ͥ9R0h|zW%J}H?",:3jUy9TL Rg<@~(>C BkF*T3@bX1y :0#3 #ܻnUQ?<&xe^$`o)%+[I밃Zm}fNdz8(su5ꄇhʮ O`Eෳ5MO~2&τ!tVĒ,[z/Wa֮ q Lǽ՞ަ2M7M%:` DT l8Кu[vf[uM&1P6Jzhp4Ly( 9s(}yS7wnX,,A4XJ2$更pZ|Hj-t/=&TWTᐫ̂+o\? 3junLJdKZ>}mZ1~㖇jyk㗝k>\M=88JfAl7XG5 r$=0ǽs]cv(_i5I Nlea.,z BT2ڣ@DbvF[йQF7P Es5ZwGp#c?}|>{)K3#_.3 +SemؘE=(@m֊9[R^<&nz|@`[܏.ffd Q,}N} /Sʹu0K>j\+͏D"QUI[gP±DJ7ȩϮ=*>斻/)':Tl fSѺvhByI,aGMĐi%87We%, ;'U$VwA; rp{*㸩 s:~r?-6ԢXr{QLPy`ڵ,Z`{C~Իb{+c[弚QHW!hFZO(dn5se Jσ5> ]0-I 25=_3*O8Irq1P DŤ窂!e;o jrNl[^x 9r>= ?KLxx SF<6`yV .*_JÃ#,-$Ny0!IA <}Hz{&w:OuwW?S],Jɍ)W۰&dÂRGJbKؼ[Vpà.w^V ])5"yo/^oDU컊q5 rc>qngyOskǽa!ϰF31B|%%§/ NDk-GViKۖYc, *^?YPOEG?WZ| - gpV}3*6MB͐U]lj79hv":a2{"M޻ V,x,Wl³c^dmfBFH33zgD֧҅=`ǻmR~\ŕKɏ.I@- _A5拪l/)1,Cp18HD]s$3@h^  bܣ]Kd:0]55O˕&(UrB\]/ӉE'o!C}]O;Zy|S-&'O/?GmTEYFg2- C s#>f w؈?Fr}bп\iJ- ]AʙCMOctw2WO'J9_ ]Gh1-{v@mEf3LUD" Yl&u!-:rLmWͪ[U^tﮉF;;-1 h3KC01۠b%[U]q5I6AI6K SRVs o"CL 4~b+C eYe O*2-R?&d`GM71^'] L~訰$Goͮd.Dzܞemy{@tbzid,-r(1m]e Cjo[mpI ,A4jA .'ьofV1  ҉4TZCSfenH`"!xKhl`${6ym7 ƛ' Gn9/]8Ô⒃iX+c?nF㲊rrwݝ`/nŋ8AVy1wfؘP[%6VvR:-́jw$S`ᛚg5R^*<5ᴧ d VPm~:M^n1JD#5ai68ð@<ܵN(G7[ 8{^y1WmaFZ4Ty5|>KɑO!O9gZ',+'FN b#SV扥JEuzb.P藜y=K# j.iqZ;';"]~jњ#I¸~))= DtE鸪|X.fnpA2y} LL9s :a xVɧwi&\"j #dB)o]b{I838A~KX!4KfؕF;{ 1Joyir-/i}lGdBNWH;Ҽ=UܮXQ>=2I)扫ޛ ݟNi&}a(DgB(;*)_7P)?,[oI ,qP4_rBPAehϭC}Y\L"ꠊvn/uiZ^kAIB2Px]%|zU] "IVIr x"CrqLQT\QXCVoѤ D/J 1K`YG)\8Dr]TW]YeR[c~><.1[Y &%X_yͳe?ըry>;ӝ aR}SϭO}(:p$C6(]Dzj," #YdG^ݦ?H 0$~#N:@aIؾv_801^^([|FH^s1}OM d!o\N}z]&oLGQEg'Ʒj˻> ;O5ZO퟇@$Wӻi[T[Y7'vjg~=Nֱ3kS9:U__k$}SQ֔UQΐK+*7E>Ȉn)TrG, ASH(y᭝/̎[Hsc?O#,3inz۱,+A2 x#m?Dq%a2R˞DQSpB& -AVo!JIאIWeBVW N.ʾr6tՎ09v)%4qPl94:M/R'XU׮NJ8_GY\qwxt%|s:?/e<i5!HηuH_'"BD=vp7-zݩn, @;(mzvc,Y[ebNt? +{:6P/M?/ њԴyc s8:a%Jy b # G2\BVsq cu~3ӸA+DSRdZؽԇ #8VU96_D3?pe ;}yα2ׂQTSa[b̥6$?\Gŝ k ?_kP'[CP~!A D9dO9ʕк$*]oi(jx;.Ù~xv"pBn&qHζ>7gL)y &>=Sm7sh(ģmZhmT4՞q3UL&E (2oV>+W7@(BO]e^5I&uK@5Cʄsd>|Xd?~3?-2:&&1C͖@^ϱὫ>T7<؊/"xqgD챡bړ5kΫ!WwD'J3zR ?]0AgpJRrBIMPgm&!,d','C0uS)^X?a@%pO'҆HCӯ4-iZ,`AfF]>A"aiq3u/<"L6997Ƨ x]u y^9BhoJh6u0!r46;;eL%&dpwRP3$,WZ([wI"ѾTgC}q;n*KTS_jA0s1TE^zuZVB}lR2yϱ"۱h;;Q[K}*qn" eQſ}J/g}ղ bݪiHv4Q zVw_lsU;:Lr=f._i#T /juF4MSLG̘~Y o !A^yj>S'`4U0LQs ) h=$P?*2agN >HF}r_dIH݇͠eʒ >O3oSt4aF\QFj蹨{X{!rxp(v'X$OWGIjUhST\x q36EUA3-n\zpTo9˩u}[-J}(Fn ޔ>-/guvSa @ߚae=ˋq}:cOYޢB3_{10M" Ԫ{2Y!J^<Ņ uFL*I@4ך9=tFUN^siNh"ݿ,wPALi&f—G}qӟ3W yb7f0ZosDQ?Gp-n?S''Qv|vi +S,EϾ.qQzk ԂjU*2lfP8Ë́ +. X ;׃Y;2nh1O0H5ʜ.2]{lVxEH Sq,hK+qkڡ0+ avTIT2\:7F?΄Q |u@>џ~hӮN ^ECL8tu!ݪ+mW_FΚ *%K/{K#\gޕ mPnYNNGI l. ~.qT-D=]n [(U|΀ɕ[1q"f繂s맸>,xЏX;LIeuGW۱ \mwafw?U,*{2b'{+=:{(_݁ W۵$f6Úl%v:B|i0N9_3քX?e*KzW6T8caT>M(Wΐ|ʧs- Imj+-JhtlJ+l>Y| -=7jFSgK>[ij L(rū$×ּ݌y QQFl A!`3P"KgB߭*Ǜ؛esN^Q^SRGwƻ$p"oH[R ܅\[{(AG6zăp~R HG>vqKx6j+$HӞgtw/ikSZEg`US7SETd6 !#^FkZkpg:PCbXimH$R\Բ!sDn2@3JΉgwʱ(O|Ѝ Y^ζ@͈ei̲Z{qa &QOg_cvǚFhky4lS.[atdW-^Qw~>TvI!ٺ1d=bmޔ` NkdvMzD\Lsw3l3kPKKL]1˞O&ilH]7is|nږ ƾU< )a`"cfaMmj;to'vx>dLr+#'}5Ʉʖɞu,j㉦ДA^}3NO|(R *#ۇcC +h|;_ i2̔?PM+w=JZ\*~Y!Uܱ>>a['# [$v%aX~p&Iʼ@N`Z\$J3uCpy.LĮ|ռݑiU4o>j! ]uS0 -/rNp"9uB3`@/k!KgyoY/f^dfL@/"L!DEψU5BٺsVy6N[Oa4`8_.\@]9mtB2=r z*Kvkx+Spڴ+*[Ъ|?dyCo m-4 :w3^DHG~c(&O0[g鍏4kDN\Y1%fx&X<"ryTVDiRÍ P$92nNINvO؋|4mmRqH_ϧg%n}O+B gp J;(a)j}`HknL?~P{yIw&8c00|mw҂)Bʳ''PGRp !Mݿ2t78&X2=0#… 1+=Tt1࢘e;ql/brp!]f.hk֪6F!"ujQ/rCq~(#YsZ Tf0#-HaK1&V1$*;ufyKd Å5C7AֲOWS3FWU V &S~JtnTaEVPP$8dklZ(|$R&l%G6C7. ^wI[:Ք7R̓{O%|H;sW׆vgrjwJeG$Y'P- .k݅*CS0>j283cQ~ϙOy\|v="GDb^&솈HjD[ '["ÏE\a)~n,@WpiZrJ(^t-Se( !m:X$롬9L^ٱDQexJ)Iu< mT)k7ǥeCF,D=`Q+^}#nu+ݎ ЧXPS +͖ޡi܅=k-jCNs|eDi:?"*Qn!/JH;*^>+F1t&L2\K]&kwTgoAQ^$\WKBo|oQZxU^Cٛ8J6\fώ!lTHX_4 0a@R H49^z@ JM.i8oHG~,-= H~88J o ;>S 7'_[=#l7{ؿhE6/9h9r0]`#fttk ,2*{8wz%iU<xXY'-`î2xǧXZnp&4yCa&X}1G; Li/ds@T1H7$rh5q3s]:QƂ|5l/-~.~X%q yp\*R2a8R 1x9:RqE# “!P.:vy2JQTV2%R,<Ȕ26=ySFkg K Dą#@$L1hg؅G AeR`>?~#w"4tMBۢ' A-~3Y;?|W7xJ,qyhpڶkX=K1 )0$sADBXR41uBs'68I}M&k< ihAvlOpV@=nKRˏ0Ȅjƀ+DK¯k endstream endobj 476 0 obj << /Length1 1573 /Length2 8073 /Length3 0 /Length 9115 /Filter /FlateDecode >> stream xڍTl6L , J,Hw7] , ²tI" J7HwKt4ȇ}gk暹gf㔱[0$'/PNC__`dԇ"`+A!l O9yBbb<<@>1<j a`W #_ +WTTw8P ڀ`@ tw ^Et9r'@( v#_ 5AN?qP?v= P0. f Fꩨ?d?_r_ 3AvPG0PKQ ` GW]<uY~W*Aw ՞ tr:jW)+lNN`>y(ls7v/?7{vPݯ&lݜ `P7_; .@=^|@&P;"n`_;xyP$ d3G@=f&oJ#"`2ݱJ;밡"{0Trx6Gaj'/@uZkzVƽtPlgt`Ȣ㈹lфy mdcd2oŊhC DsYG>s:kXŢT_{ t%;)m,{bIz9PeSUMz <.n8OQH7[C d\]J6q9u&Ym7sNgqٟ/P *D_E}%/ꤊ aīlKYz*'Ys9螒fIfSm(ygQs~8LL'l6YXWlP'SVosMzR)^QOOq@jy(~9sL1Kjg ~ke%L~6Erg7v=va-D! |(^! Sc\ (SKq]i>-VI|ey\x*j\IIU j(^Ewf'k0ܶi-h"8RŨ26PO(O>o?J-YyPM<P7D4Zg-~q, ͙?2o񴒪. b#h_vד Xbn $6IwDi9pR ϧtT8m0O\=."q;n4H=W+"4L攋T$$ (nFf-~bv˧Ӏ(]T[hWʪ!- Y祙y0ENq#&HU8Thۿt%eZ)nH6%"TGr|OC&k吁$Ig#[VH g_nwNx{{R~c* ێ>@I,-ܯсtHqɭ;_vw?FaA|草l̷*qFǔ2y1KxZ9bʜF[gM?+JM^Oat*2\iJCL;LwsrSK'CRmb&_z5T+zb)tΨk*r ugW_Nu>QUMt)־oZvhZL).ktmޜk RQ4d|vEa 2SkEމ`HlAEe|Fq!j71փ Dչb6l3ROFCZ,\bڠ~V-e<[ЛeSMϤFr_!-Cp({ C[cnP/ViONϤhy9ܮ\n0i>:+#S֡}I$ NtPicFBAW*93=љH)@CTL?#hK4*G%ta #<9|C8/d**Zէ?$0 Eqd`'Yuo2yHYimpB2' hb OQ6!d C?@;Тxɯ{M9_o:b4>`/OΪ2P)sd(zTW7,݀%}N*jx Ws\&)u8LJay7%Za%ljV 5Èqͬ~}Za[QS4hV%?ܭÜ=ƙ 7IOdb`j^SM5͹ Fg ]Y"G崖fI&\ r L !LOX`{lz m +vLPgs6B}4ܦrLǺ3Z8xc@ MIc;/hӴtov.}*Sj8^3Gi&H`}ٗY[ӪIM֎#p:aR|j^ʃЩljh Nc` n I#4Ao<<Х+;A'WQ)1N0euPYb&JRY<(0;e;3;ذbJid=wVf+ʱꂶCMm mʆzBw(rf>>$IuvA9FeuT@'2^(p}hˀ_v&NN׾f0ljI, S5'iN?;׶ߐ?ZӍMl )o;|,xe:WO68cp 7}Taz j^ %8CG֎cDwMГrBqbLA)a 4ٞR>4o iOFI#ǤzqpJ>ՇHh €WQ4@>2+[V-R|b{UI]0~j1G_hǐ\zƼ"ͦ}gt/_WM/##{6ŲbfB?5mκ(&&kJ$wǥh倩7._sIo!ZVZy6SxONGkSZ DM%x7dcOI&oĚD$F; %84>+9.9óL>;1dm b rB 2p2JOX[ m%G2-t 3Qs5#v&&A1CB{ 7ʠB㽵P=ijj#r 7MoeCFteFhg`BXZ7J GNitD\%4兒!b5 }cDbFZ}s@& {6?h nΘ"Qg8dRv8s Yőщյ 6ot[Z|\)j=P4~\j> eျ-[r5Ŧמ0^i+'id7{[{&ZOԍdp u 1<{?a fd_図to$,%߶fQ93G1S+V|\ ':|fm?$6.\ !s:׍y&.[ѳ9(q$dC UNvosr3αM2IPmd+Kf#vFPkW$D>P=mq#ӏz@:e JI7 '6ooQMnT1E31f\fĪ#}̢ /SoX <"x/}_ଈQIb-']'ܾ;O?,ALF $T#͔y[<MV9SC B{J9P 4QevUŏoogFA;-u8n #eC5.SxmR!՜g#W:5MOgo-PK-kh+ 7?B*LH7exד!TA+%jΗ[% {;4xPh}U8R㘖x/}kZ?[C "s/ owV]&9MJHEnz.It*w w8Cg5za ,Qv{Rp97`al-WVAҭR3^J+_*} b3fNI2K@/[~S;Cl멚*́K<K%5/Ck, ӷxyR͞U_ h._xȈoZԓ/t>WQV{SQΤ9H~zBVCl{g&ݷX_ jNp>k-Z#]ޣxS,Ũv؎6)#2(+^> ro>zX4l>ZBIImW;\p?HUgA0m,+<(p(V!2b*@>eJ@n&ɒv8,!Blm㞼 9ĩR Eַ@GTu+DX6 gF$hiZeI7q CAŗ`j8~Z}k]O^ ^Yi,ycH(㝂B^d(ա>"o"w0#'R1Kyw-GՒ+2k MG]f=dUzxP\tAʼnߵ߈KO)qz TUk}sj4wj]RmA1ވn"J8e y|*6 S'Sc(ڷFQ<"L29(԰U?kWA]R߬qD>fwа\ji88'gA b;MdVΫAMxؐV=p۵Уᆴ|ƖL1ؼdY^݉ͮ->=yf"AuR]e1 vF`I+^V@)B &DkI Dhθ]'mzVZK-1L#ȉ.KkLF}ʅA'›^˹Y&UIiqYrCN<…FGy/>ƛ,\<:zef%g:`_^n[URM|I=nd2_o[HH4^i_^'U #bۼ=7XO6v|@lkl:sj7밖]jmw$<&CKurݜ0sJԼdk-|o*dWUX/mdƺqeւׅHNQ7#lqtt7c3Ik_\e AP=|mTRdsOxqĽBd0ݦQX?jB2UʎHȡ0OWٍuެ!ڔ'}FCRyXu :η:ƍ2jhTreE|.G#Rqwa_Wf~yӠq#d]8 L&dh<&l>&9/fE5[]: VPKZjP|;iDv&l#j Jڴѭ(T [${Dc c2ЈLGoXg@*y )aEL-L"hg[$), endstream endobj 395 0 obj << /Type /ObjStm /N 100 /First 927 /Length 5269 /Filter /FlateDecode >> stream x 4(lT@P %8ؕ1 ؔtA/*(DPR@݁K6+'`:bϣTPcpJ+B@HBI@ő{%L;T:ջ@J|?VRZT'7ɔhXma]FƗhKg7cr4nlf"=-Z6_ۑx^<|XTOaj9z*:X_t (R1Et ^.7Q=/>՛O>/ qUÍzڃ 9MߩzzV( |8؝|/>‍.U!~ځ//O` +%_?'#)!)F>}黳aZgڝL)D|W\>πh 8 dY 6"x;>ÚՋkCP=:'z{\WG_ar2R] ξ1m52BltQc^S \0V`b ^遼y=l>߆>1@܃ԧtXx۷oekk~hUZ ƕAŭW>(滳R3FJʧ+[|8 ;G|^ʳe=𤘶6$HVn ,9\mdW [! rș_t?:ziU 9ht,~>RQr2XH< !{H8!.^Nҥ?94-XMjhk&+،/1isiv*(2 0v,Ǧ{ށ?eʉBhAS\`>b)!+HO K1 rB@OyC9w pD"{:{)Ĝ ex>QCQlr>)ޡgTXNb&(# T5бQLҾYIM; ҂*9 hl(VRI+  ǵq'q=PӁعzr9HAz|D64*jm6DL->q9 pl&v0rF"uW&j_ nx\h[WfN`PA2.dm;3WR8EjD䍖N_wr;1g"/k&)P#BWS°L[&eUZZ*<7BxCD"Z(ɐ.B{@*`i-7ex M0RE䆦)1 SGYr,9_h=%%ыQ)APשG#dq5TtLI-TOe8>12Ʀdӳ>,fIRMZl]}3'IMS'Y-9K.m\J ϱleQz`JcYgxb)[|<;=ى̟ubz <- {v@Y:d<tsB>٦Md V[:Ă#7ýA&,ߒXW.1F57K 3y{"7< )K-6mӳ;zvDOoB o$ݝQfRG:pmkN:bZÏH H5jdrCϟrw':pkD26~^Ụ3O)D_@>F|1<.z_]FUl4ߎ_`< 0 q5QPu5]YXE|hV|ѫπseS%<5_!'p3 :W6շ6''{zd䀊Z.\L?9|-y|ܛDm.#Tx#tcJ!*p8 oξV9Eöצ[hBt( E)Lp0X ך&z6/_}8~E}kI#='MV˽c"dZ.$=sda\'_WR%(1vI UF`AEMFr9_^ëQ;v[='rp<@>Yh0ilr~9i~ȆQu_;rſj=c.zEs/=#.vŎP2#^n~݀-UF-z7O=_>aa\wWtφmosž{RЛ -o嬶[{~Aog;]z-UٺWNf&'a]+ZgD-wWtKLUPyN[uvooVYXQ;ڀrov⿋~<߭T]+Tzao;u{\jpd2u]nAy"m[uѯxO-]~ qm?vAeOpm##bEƟo/&Ȃ endstream endobj 507 0 obj << /Author(Douglas Bates, Dirk Eddelbuettel)/Title(Fast and Elegant Numerical Linear Algebra Using the RcppEigen Package)/Subject()/Creator(LaTeX with hyperref package)/Producer(pdfTeX-1.40.15)/Keywords(linear algebra, template programmig, R, C++, Rcpp) /CreationDate (D:20160120095740-06'00') /ModDate (D:20160120095740-06'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.15 (TeX Live 2015/dev/Debian) kpathsea version 6.2.1dev) >> endobj 479 0 obj << /Type /ObjStm /N 53 /First 443 /Length 2112 /Filter /FlateDecode >> stream xڭY]s۶}ׯ[X7M3N{}Li 9WI]@R> g D2A31#2JHA+rql5ؔ(e`|t& VdR) ƨ,'dbX,:ӂ_fh-1&!Z3\"lF ~pI SgrF0 ?-!3Ҁ?@f -G\,Ώwe}%\>whbhNF ~z]ϛjozJc"G  "hAwq]f0dǐ+ )`a]4v3%7M]-t|߉ UQ׶F/N^l:߮-:zrWc_ Pܸ x.=vSo 4m.ӧb/Z(}r:UZMU.|R R~.g|Ww2Ma[,.Nj.tzYUs$aR fv_}rتCXfR쫺юѢ.tXe+t ~_ޛ00 >zBxF]lאRvpoI] #Ʃt7V#g7ʾ"{W_*b|Ae9vcc75p0wS.~&zlF'}lwc4)<#Ld8wP 5}eA66޷3Vt?by#tor'g³Hτ'p=AE~3|Y5mbW"Gz&0:E8`pd3J@aE"9^JՄǷcsa>[f֫ OJ~J[$U:|j5Eo ]) l>LKAFi7B endstream endobj 508 0 obj << /Type /XRef /Index [0 509] /Size 509 /W [1 3 1] /Root 506 0 R /Info 507 0 R /ID [<1CEB351D247F85445DB38208B50A24C1> <1CEB351D247F85445DB38208B50A24C1>] /Length 1214 /Filter /FlateDecode >> stream xIh^Ui3iӦn3tH$myl6mc4 .DK. ԅM܈xJkQ +nA8w}y_e,e5dDr   (T*P àGA-8!D^'@=Z-hhѴUh'􌓠Mohgt6p M= Π =΂v4`8vMo~tDSJ@7m43ДЋV~j4} 5C`$ 5F4c`mMhT)0vMf`,% .+`\u n[`òRƼ ˀFN7 nR!#eA`xvl@[Pmmx܏Wp^Qj! -۝mjuB\mu;,{X;YB-gE'۲;ZH}S`زKufq0 MlS=o1c:tDul?ӯq\n:к;`W@> e -]i1r'%fm' 5i<efZ\fg8l-40@C=`l:ϴ4SV[_48pIԄ0{M4իs22Ɯc`u׽C@SO#m h-w7{/-` e fFuˀC5pR`FP`MphJQ]3{J;?\&Cp[`ۊO5 jܱPHAcZ)#ցf]@#e|:"|0"Տ< t({UP;UT5jF)c{zx'-W˭5*/+_(ܗXُF-ۊ,4h/MXNE=ESEVhVZݻ.Y#EsV_EVxLт33.ZqeDђ* )oTϤ'Ҟ&IiҞ'ҞH{"ISH{"퉴'ҞH{"퉴'ҞH{"퉴^+ԝYoJh endstream endobj startxref 387012 %%EOF RcppEigen/inst/doc/RcppEigen-Introduction.R0000644000176200001440000000072712647727164020354 0ustar liggesusers### R code from vignette source 'RcppEigen-Introduction.Rnw' ################################################### ### code chunk number 1: RcppEigen-Introduction.Rnw:8-13 ################################################### pkgVersion <- packageDescription("RcppEigen")$Version pkgDate <- packageDescription("RcppEigen")$Date prettyDate <- format(Sys.Date(), "%B %e, %Y") #require("RcppEigen") #eigenVersion <- paste(unlist(.Call("eigen_version", FALSE)), collapse=".") RcppEigen/inst/doc/unitTests/0000755000176200001440000000000012253717461015662 5ustar liggesusersRcppEigen/inst/doc/unitTests/RcppEigen-unitTests.Rnw0000644000176200001440000000000012253717461022214 0ustar liggesusersRcppEigen/inst/doc/unitTests/RcppEigen-unitTests.R0000644000176200001440000000150712253717461021664 0ustar liggesuserspkg <- "RcppEigen" # load this package require( pkg, character.only = TRUE ) #load RUnit runit <- "RUnit" ; require( runit, character.only = TRUE ) if( file.exists( "unitTests-results" ) ){ unlink("unitTests-results", recursive = TRUE ) } dir.create( "unitTests-results" ) path <- system.file("unitTests", package = pkg) testSuite <- defineTestSuite(name=paste(pkg, "unit testing"), dirs = path) tests <- runTestSuite(testSuite) printHTMLProtocol(tests, fileName= sprintf( "unitTests-results/%s-unitTests.html" , pkg ) ) printTextProtocol(tests, fileName= sprintf( "unitTests-results/%s-unitTests.txt" , pkg ) ) if( file.exists( "/tmp" ) ){ file.copy( sprintf( "unitTests-results/%s-unitTests.txt" , pkg ) , "/tmp", overwrite = TRUE ) file.copy( sprintf( "unitTests-results/%s-unitTests.html", pkg ) , "/tmp", overwrite = TRUE ) } RcppEigen/inst/unitTests/0000755000176200001440000000000012647572761015126 5ustar liggesusersRcppEigen/inst/unitTests/runit.fastLm.R0000644000176200001440000000621612565743640017637 0ustar liggesusers#!/usr/bin/r -t # # Copyright (C) 2011 - 2015 Douglas Bates, Dirk Eddelbuettel and Romain Francois # # This file is part of RcppEigen # # RcppEigen is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # RcppEigen is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with RcppEigen. If not, see . .setUp <- function(){ suppressMessages(require(datasets)) suppressMessages(require(RcppEigen)) } test.fastLm <- function() { data(trees, package="datasets") flm0 <- .Call("RcppEigen_fastLm_Impl", cbind(1, log(trees$Girth)), log(trees$Volume), 0L, PACKAGE="RcppEigen") flm1 <- .Call("RcppEigen_fastLm_Impl", cbind(1, log(trees$Girth)), log(trees$Volume), 1L, PACKAGE="RcppEigen") flm2 <- .Call("RcppEigen_fastLm_Impl", cbind(1, log(trees$Girth)), log(trees$Volume), 2L, PACKAGE="RcppEigen") flm3 <- .Call("RcppEigen_fastLm_Impl", cbind(1, log(trees$Girth)), log(trees$Volume), 3L, PACKAGE="RcppEigen") flm4 <- .Call("RcppEigen_fastLm_Impl", cbind(1, log(trees$Girth)), log(trees$Volume), 4L, PACKAGE="RcppEigen") flm5 <- .Call("RcppEigen_fastLm_Impl", cbind(1, log(trees$Girth)), log(trees$Volume), 5L, PACKAGE="RcppEigen") fit <- lm(log(Volume) ~ log(Girth), data=trees) fitCoef <- unname(coef(fit)) fitStdErr <- unname(coef(summary(fit))[, "Std. Error", drop = TRUE]) checkEquals(flm0$coefficients, fitCoef, msg="fastLm0.coef") checkEquals(flm0$se, fitStdErr, msg="fastLm0.stderr") checkEquals(flm1$coefficients, fitCoef, msg="fastLm1.coef") checkEquals(flm1$se, fitStdErr, msg="fastLm1.stderr") checkEquals(flm2$coefficients, fitCoef, msg="fastLm2.coef") checkEquals(flm2$se, fitStdErr, msg="fastLm2.stderr") checkEquals(flm3$coefficients, fitCoef, msg="fastLm3.coef") checkEquals(flm3$se, fitStdErr, msg="fastLm3.stderr") checkEquals(flm4$coefficients, fitCoef, msg="fastLm0.coef") checkEquals(flm4$se, fitStdErr, msg="fastLm0.stderr") checkEquals(flm5$coefficients, fitCoef, msg="fastLm0.coef") checkEquals(flm5$se, fitStdErr, msg="fastLm0.stderr") } test.fastLm.formula <- function() { data(trees, package="datasets") flm <- fastLm(log(Volume) ~ log(Girth), data=trees) fit <- lm(log(Volume) ~ log(Girth), data=trees) checkEquals(flm$coefficients, coef(fit), msg="fastLm.formula.coef") checkEquals(as.numeric(flm$se), as.numeric(coef(summary(fit))[,2]), msg="fastLm.formula.stderr") } RcppEigen/inst/unitTests/runit.transform.R0000644000176200001440000000547712446006401020415 0ustar liggesusers# # Copyright (C) 2012 - 2013 Douglas Bates, Dirk Eddelbuettel and Romain Francois # # This file is part of RcppEigen. # # RcppEigen is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # RcppEigen is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Rcpp. If not, see . incl <- ' typedef Eigen::ArrayXd Ar1; typedef Eigen::Map MAr1; typedef Eigen::ArrayXXd Ar2; typedef Eigen::Map MAr2; typedef Eigen::MatrixXd Mat; typedef Eigen::Map MMat; typedef Eigen::VectorXd Vec; typedef Eigen::Map MVec; ' definitions <- list( "ar1_unbounded" = list(signature(x_="numeric"), ' MAr1 x(as(x_)); return List::create(Named("abs", x.abs()), Named("abs2", x.abs2()), Named("exp", x.exp()), Named("cos", x.cos())); '), "ar2_unbounded" = list(signature(X_="matrix"), ' MAr2 X(as(X_)); return List::create(Named("abs", X.abs()), Named("abs2", X.abs2()), Named("exp", X.exp()), Named("cos", X.cos())); ') ) .setUp <- function() { suppressMessages(require(inline)) suppressMessages(require(RcppEigen)) cxxargs <- ifelse(Rcpp:::capabilities()[["initializer lists"]], "-std=c++0x","") tests <- ".rcppeigen.trans" if( ! exists( tests, globalenv() )) { fun <- RcppEigen:::compile_unit_tests(definitions, includes=incl, cxxargs = cxxargs) names(fun) <- names(definitions) assign(tests, fun, globalenv()) } } test.transformationAr1 <- function() { set.seed(1234321) x <- rnorm(10L) res <- .rcppeigen.trans$ar1_unbounded(x) checkEquals(res$abs, abs(x)) checkEquals(res$abs2, x * x) checkEquals(res$exp, exp(x)) checkEquals(res$cos, cos(x)) } test.transformationAr2 <- function() { set.seed(1234321) X <- matrix(rnorm(100L), nrow = 10, ncol = 10) res <- .rcppeigen.trans$ar2_unbounded(X) checkEquals(res$abs, abs(X)) checkEquals(res$abs2, X * X) checkEquals(res$exp, exp(X)) checkEquals(res$cos, cos(X)) } RcppEigen/inst/unitTests/runit.solutions.R0000644000176200001440000000642712254726265020453 0ustar liggesusers# # Copyright (C) 2012 - 2013 Douglas Bates, Dirk Eddelbuettel and Romain Francois # # This file is part of RcppEigen. # # RcppEigen is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # RcppEigen is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Rcpp. If not, see . incl <- ' typedef Eigen::ArrayXd Ar1; typedef Eigen::Map MAr1; typedef Eigen::ArrayXXd Ar2; typedef Eigen::Map MAr2; typedef Eigen::MatrixXd Mat; typedef Eigen::Map MMat; typedef Eigen::VectorXd Vec; typedef Eigen::Map MVec; typedef Eigen::PartialPivLU PPLU; typedef Eigen::ColPivHouseholderQR CPQR; ' definitions <- list( "dense_PPLU" = list(signature(A_="matrix", b_="numeric"), ' MMat A(as(A_)); MVec b(as(b_)); PPLU lu(A); Mat Ainv(lu.inverse()); Vec x(lu.solve(b)); return List::create(Named("A", A), Named("Ainv", Ainv), Named("b", b), Named("x", x)); '), "dense_CPQR" = list(signature(A_="matrix", b_="numeric"), ' MMat A(as(A_)); MVec b(as(b_)); CPQR qr(A); Mat Ainv(qr.inverse()); Vec x(qr.solve(b)); return List::create(Named("Ainv", Ainv), Named("x", x)); ') ) .setUp <- function() { suppressMessages(require(inline)) suppressMessages(require(RcppEigen)) cxxargs <- ifelse(Rcpp:::capabilities()[["initializer lists"]], "-std=c++0x","") tests <- ".rcppeigen.solve" if( ! exists( tests, globalenv() )) { fun <- RcppEigen:::compile_unit_tests(definitions, includes=incl, cxxargs = cxxargs) names(fun) <- names(definitions) assign( tests, fun, globalenv() ) } } test.smallDense <- function() { A <- matrix(c(1,2,3,4), nrow=2L) B <- matrix(c(5,6,7,8), nrow=2L) b <- c(1,1) ## solutions to dense systems res <- .rcppeigen.solve$dense_PPLU(A, b) checkEquals(res$Ainv, solve(A)) checkEquals(res$x, solve(A, b)) res <- .rcppeigen.solve$dense_CPQR(A, b) checkEquals(res$Ainv, solve(A)) checkEquals(res$x, solve(A, b)) } test.largeDense <- function() { set.seed(1234321) N <- 100L AA <- matrix(rnorm(N * N), nrow=N) bb <- rnorm(N) res <- .rcppeigen.solve$dense_PPLU(AA, bb) checkEquals(res$Ainv, solve(AA)) checkEquals(res$x, solve(AA, bb)) res <- .rcppeigen.solve$dense_CPQR(AA, bb) checkEquals(res$Ainv, solve(AA)) checkEquals(res$x, solve(AA, bb)) } RcppEigen/inst/unitTests/runit.wrap.R0000644000176200001440000003105512446006401017342 0ustar liggesusers# # Copyright (C) 2012 - 2013 Douglas Bates, Dirk Eddelbuettel and Romain Francois # # This file is part of RcppEigen. # # RcppEigen is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # RcppEigen is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Rcpp. If not, see . incl <- ' // double typedef Eigen::ArrayXd Ar1; typedef Eigen::Map MAr1; typedef Eigen::ArrayXXd Ar2; typedef Eigen::Map MAr2; typedef Eigen::MatrixXd Mat; typedef Eigen::Map MMat; typedef Eigen::VectorXd Vec; typedef Eigen::Map MVec; // integer typedef Eigen::ArrayXi iAr1; typedef Eigen::Map MiAr1; typedef Eigen::ArrayXXi iAr2; typedef Eigen::Map MiAr2; typedef Eigen::MatrixXi iMat; typedef Eigen::Map MiMat; typedef Eigen::VectorXi iVec; typedef Eigen::Map MiVec; // unsigned integer typedef Eigen::Array uiAr1; typedef Eigen::Map MuiAr1; typedef Eigen::Array uiAr2; typedef Eigen::Map MuiAr2; typedef Eigen::Matrix uiMat; typedef Eigen::Map MuiMat; typedef Eigen::Matrix uiVec; typedef Eigen::Map MuiVec; // float typedef Eigen::ArrayXf fAr1; typedef Eigen::Map MfAr1; typedef Eigen::ArrayXXf fAr2; typedef Eigen::Map MfAr2; typedef Eigen::MatrixXf fMat; typedef Eigen::Map MfMat; typedef Eigen::VectorXf fVec; typedef Eigen::Map MfVec; // complex double typedef Eigen::ArrayXcd cdAr1; typedef Eigen::Map McdAr1; typedef Eigen::ArrayXXcd cdAr2; typedef Eigen::Map McdAr2; typedef Eigen::MatrixXcd cdMat; typedef Eigen::Map McdMat; typedef Eigen::VectorXcd cdVec; typedef Eigen::Map McdVec; ' definitions <- list( "wrap_vectors" = list(signature(), ' List vecs = List::create( _["Vec"] = cdVec::Zero(5), _["Vec"] = Vec::Zero(5), _["Vec"] = fVec::Zero(5), _["Vec"] = iVec::Zero(5), _["Vec"] = uiVec::Zero(5) ); // A VectorX behaves as a matrix with one column but is converted to // a vector object in R, not a matrix of one column. The distinction is // that VectorX objects are defined at compile time to have one column, // whereas a MatrixX has a dynamic number of columns that is set to 1 // during execution of the code. A MatrixX object can be resized to have // a different number of columns. A VectorX object cannot. List cols = List::create( _["Col"] = cdMat::Zero(5, 1), _["Col"] = Mat::Zero(5, 1), _["Col"] = fMat::Zero(5, 1), _["Col"] = iMat::Zero(5, 1), _["Col"] = uiMat::Zero(5, 1) ); List rows = List::create( _["Row"] = Eigen::RowVectorXcd::Zero(5), _["Row"] = Eigen::RowVectorXd::Zero(5), _["Row"] = Eigen::RowVectorXf::Zero(5), _["Row"] = Eigen::RowVectorXi::Zero(5), _["Row"] = Eigen::Matrix::Zero(5) ); List matrices = List::create( _["Mat"] = cdMat::Identity(3, 3), _["Mat"] = Mat::Identity(3, 3), _["Mat"] = fMat::Identity(3, 3), _["Mat"] = iMat::Identity(3, 3), _["Mat"] = uiMat::Identity(3, 3) ); // ArrayXX objects have the same structure as matrices but allow // componentwise arithmetic. A * B is matrix multiplication for // matrices and componentwise multiplication for arrays. List arrays2 = List::create( _["Arr2"] = cdAr2::Zero(3, 3), _["Arr2"] = Ar2::Zero(3, 3), _["Arr2"] = fAr2::Zero(3, 3), _["Arr2"] = iAr2::Zero(3, 3), _["Arr2"] = uiAr2::Zero(3, 3) ); // ArrayX objects have the same structure as VectorX objects // but allow componentwise arithmetic, including functions like exp, log, // sqrt, ... List arrays1 = List::create( _["Arr1"] = cdAr1::Zero(5), _["Arr1"] = Ar1::Zero(5), _["Arr1"] = fAr1::Zero(5), _["Arr1"] = iAr1::Zero(5), _["Arr1"] = uiAr1::Zero(5) ); List operations = List::create( _["Op_seq"] = Eigen::ArrayXd::LinSpaced(6, 1, 10), // arguments are length.out, start, end _["Op_log"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).log(), _["Op_exp"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).exp(), _["Op_sqrt"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).sqrt(), _["Op_cos"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).cos() ); List output = List::create( _["vectors : VectorX"] = vecs, _["matrices : MatrixX"] = matrices, _["rows : RowVectorX"] = rows, _["columns : MatrixX"] = cols, _["arrays2d : ArrayXX"] = arrays2, _["arrays1d : ArrayX"] = arrays1, _["operations : ArrayXd"] = operations ); return output; '), "as_Vec" = list(signature(input_ = "list"), ' List input(input_) ; // Column vector iVec m1 = input[0] ; /* implicit as */ Vec m2 = input[1] ; /* implicit as */ uiVec m3 = input[0] ; /* implicit as */ fVec m4 = input[1] ; /* implicit as */ // Row vector Eigen::Matrix m5 = input[0] ; /* implicit as */ Eigen::Matrix m6 = input[1] ; /* implicit as */ Eigen::Matrix m7 = input[0] ; /* implicit as */ Eigen::Matrix m8 = input[1] ; /* implicit as */ // Mapped vector MiVec m9 = input[0] ; /* implicit as */ MVec m10 = input[1] ; /* implicit as */ List res = List::create(m1.sum(), m2.sum(), m3.sum(), m4.sum(), m5.sum(), m6.sum(), m7.sum(), m8.sum(), m9.sum(), m10.sum()); return res ; '), "as_Array" = list(signature(input_ = "list"), ' List input(input_) ; // Column array iAr1 m1 = input[0] ; /* implicit as */ Ar1 m2 = input[1] ; /* implicit as */ uiAr1 m3 = input[0] ; /* implicit as */ fAr1 m4 = input[1] ; /* implicit as */ // Row array Eigen::Array m5 = input[0] ; /* implicit as */ Eigen::Array m6 = input[1] ; /* implicit as */ Eigen::Array m7 = input[0] ; /* implicit as */ Eigen::Array m8 = input[1] ; /* implicit as */ // Mapped array MiAr1 m9 = input[0] ; /* implicit as */ MAr1 m10 = input[1] ; /* implicit as */ List res = List::create(m1.sum(), m2.sum(), m3.sum(), m4.sum(), m5.sum(), m6.sum(), m7.sum(), m8.sum(), m9.sum(), m10.sum()); return res ; '), "as_Mat" = list(signature(input_ = "list"), ' List input(input_) ; // Copy to matrix iMat m1 = input[0] ; /* implicit as */ Mat m2 = input[1] ; /* implicit as */ uiMat m3 = input[0] ; /* implicit as */ fMat m4 = input[1] ; /* implicit as */ // Mapped matrix MiMat m5 = input[0] ; /* implicit as */ MMat m6 = input[1] ; /* implicit as */ List res = List::create(m1.sum(), m2.sum(), m3.sum(), m4.sum(), m5.sum(), m6.sum()); return res ; '), "as_Array2D" = list(signature(input_ = "list"), ' List input(input_) ; // Copy to 2D array iAr2 m1 = input[0] ; /* implicit as */ Ar2 m2 = input[1] ; /* implicit as */ uiAr2 m3 = input[0] ; /* implicit as */ fAr2 m4 = input[1] ; /* implicit as */ // Mapped 2D array MiAr2 m5 = input[0] ; /* implicit as */ MAr2 m6 = input[1] ; /* implicit as */ List res = List::create(m1.sum(), m2.sum(), m3.sum(), m4.sum(), m5.sum(), m6.sum()); return res ; ') ) .setUp <- function() { suppressMessages(require(inline)) suppressMessages(require(RcppEigen)) cxxargs <- ifelse(Rcpp:::capabilities()[["initializer lists"]], "-std=c++0x","") tests <- ".rcppeigen.wrap" if( ! exists( tests, globalenv() )) { fun <- RcppEigen:::compile_unit_tests(definitions, includes=incl, cxxargs = cxxargs) names(fun) <- names(definitions) assign(tests, fun, globalenv()) } } test.wrapVectors <- function() { res <- .rcppeigen.wrap$wrap_vectors() checkEquals(res[[1]][[1]], complex(5)) checkEquals(res[[1]][[2]], double(5)) checkEquals(res[[1]][[3]], double(5)) checkEquals(res[[1]][[4]], integer(5)) checkEquals(res[[1]][[5]], integer(5)) checkEquals(res[[2]][[1]], (1+0i) * diag(nr=3L)) checkEquals(res[[2]][[2]], diag(nr=3L)) checkEquals(res[[2]][[3]], diag(nr=3L)) checkEquals(res[[2]][[4]], matrix(as.integer((diag(nr=3L))),nr=3L)) checkEquals(res[[2]][[5]], matrix(as.integer((diag(nr=3L))),nr=3L)) checkEquals(res[[3]][[1]], matrix(complex(5), nr=1L)) checkEquals(res[[3]][[2]], matrix(numeric(5), nr=1L)) checkEquals(res[[3]][[3]], matrix(numeric(5), nr=1L)) checkEquals(res[[3]][[4]], matrix(integer(5), nr=1L)) checkEquals(res[[3]][[5]], matrix(integer(5), nr=1L)) checkEquals(res[[4]][[1]], as.matrix(complex(5))) checkEquals(res[[4]][[2]], as.matrix(numeric(5))) checkEquals(res[[4]][[3]], as.matrix(numeric(5))) checkEquals(res[[4]][[4]], as.matrix(integer(5))) checkEquals(res[[4]][[5]], as.matrix(integer(5))) checkEquals(res[[5]][[1]], matrix(complex(9L), nc=3L)) checkEquals(res[[5]][[2]], matrix(numeric(9L), nc=3L)) checkEquals(res[[5]][[3]], matrix(numeric(9L), nc=3L)) checkEquals(res[[5]][[4]], matrix(integer(9L), nc=3L)) checkEquals(res[[5]][[5]], matrix(integer(9L), nc=3L)) checkEquals(res[[6]][[1]], complex(5)) checkEquals(res[[6]][[2]], double(5)) checkEquals(res[[6]][[3]], double(5)) checkEquals(res[[6]][[4]], integer(5)) checkEquals(res[[6]][[5]], integer(5)) oneTen <- seq(1, 10, length.out=6L) checkEquals(res[[7]][[1]], oneTen) checkEquals(res[[7]][[2]], log(oneTen)) checkEquals(res[[7]][[3]], exp(oneTen)) checkEquals(res[[7]][[4]], sqrt(oneTen)) checkEquals(res[[7]][[5]], cos(oneTen)) } test.asVec <- function() { res <- .rcppeigen.wrap$as_Vec(list(1:10, as.numeric(1:10))) checkEquals(unlist(res), rep.int(55, 10L)) } test.asArray <- function() { res <- .rcppeigen.wrap$as_Array(list(1:10, as.numeric(1:10))) checkEquals(unlist(res), rep.int(55, 10L)) } test.asMat <- function() { integer_mat <- matrix(as.integer(diag(nrow = 5L))) numeric_mat <- diag(nrow = 5L) res <- .rcppeigen.wrap$as_Mat(list(integer_mat, numeric_mat)) checkEquals(unlist(res), rep.int(5, 6L)) } test.asArray2D <- function() { integer_mat <- matrix(as.integer(diag(nrow = 5L))) numeric_mat <- diag(nrow = 5L) res <- .rcppeigen.wrap$as_Array2D(list(integer_mat, numeric_mat)) checkEquals(unlist(res), rep.int(5, 6L)) } RcppEigen/inst/unitTests/runit.RcppEigen.R0000644000176200001440000002546612253717461020271 0ustar liggesusers#!/usr/bin/r -t # # Copyright (C) 2011 Douglas Bates, Dirk Eddelbuettel and Romain Francois # # This file is part of RcppEigen # # RcppEigen is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # RcppEigen is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with RcppEigen. If not, see . .setUp <- function(){ suppressMessages(require(inline)) } test.wrap.R <- function(){ fx <- cxxfunction( , ' List vecs = List::create( _["Vec"] = Eigen::VectorXcd::Zero(5), _["Vec"] = Eigen::VectorXd::Zero(5), _["Vec"] = Eigen::VectorXf::Zero(5), _["Vec"] = Eigen::VectorXi::Zero(5) ); // A VectorX behaves as a matrix with one column but is converted to // a vector object in R, not a matrix of one column. The distinction is // that VectorX objects are defined at compile time to have one column, // whereas a MatrixX has a dynamic number of columns that is set to 1 // during execution of the code. A MatrixX object can be resized to have // a different number of columns. A VectorX object cannot. List cols = List::create( _["Col"] = Eigen::MatrixXcd::Zero(5, 1), _["Col"] = Eigen::MatrixXd::Zero(5, 1), _["Col"] = Eigen::MatrixXf::Zero(5, 1), _["Col"] = Eigen::MatrixXi::Zero(5, 1) ); List rows = List::create( _["Row"] = Eigen::RowVectorXcd::Zero(5), _["Row"] = Eigen::RowVectorXd::Zero(5), _["Row"] = Eigen::RowVectorXf::Zero(5), _["Row"] = Eigen::RowVectorXi::Zero(5) ); List matrices = List::create( _["Mat"] = Eigen::MatrixXcd::Identity(3, 3), _["Mat"] = Eigen::MatrixXd::Identity(3, 3), _["Mat"] = Eigen::MatrixXf::Identity(3, 3), _["Mat"] = Eigen::MatrixXi::Identity(3, 3) ); // ArrayXX objects have the same structure as matrices but allow // componentwise arithmetic. A * B is matrix multiplication for // matrices and componentwise multiplication for arrays. List arrays2 = List::create( _["Arr2"] = Eigen::ArrayXXcd::Zero(3, 3), _["Arr2"] = Eigen::ArrayXXd::Zero(3, 3), _["Arr2"] = Eigen::ArrayXXf::Zero(3, 3), _["Arr2"] = Eigen::ArrayXXi::Zero(3, 3) ); // ArrayX objects have the same structure as VectorX objects // but allow componentwise arithmetic, including functions like exp, log, // sqrt, ... List arrays1 = List::create( _["Arr1"] = Eigen::ArrayXcd::Zero(5), _["Arr1"] = Eigen::ArrayXd::Zero(5), _["Arr1"] = Eigen::ArrayXf::Zero(5), _["Arr1"] = Eigen::ArrayXi::Zero(5) ); List operations = List::create( _["Op_seq"] = Eigen::ArrayXd::LinSpaced(6, 1, 10), // arguments are length.out, start, end _["Op_log"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).log(), _["Op_exp"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).exp(), _["Op_sqrt"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).sqrt(), _["Op_cos"] = Eigen::ArrayXd::LinSpaced(6, 1, 10).cos() ); List output = List::create( _["vectors : VectorX"] = vecs, _["matrices : MatrixX"] = matrices, _["rows : RowVectorX"] = rows, _["columns : MatrixX"] = cols, _["arrays2d : ArrayXX"] = arrays2, _["arrays1d : ArrayX"] = arrays1, _["operations : ArrayXd"] = operations ); return output ; ' , plugin = "RcppEigen" ) res <- fx() checkEquals( res[["vectors : VectorX"]][["Vec"]], complex(5), msg = "VectorXcd::Zero(5)") checkEquals( res[["vectors : VectorX"]][["Vec"]], double(5), msg = "VectorXd::Zero(5)") checkEquals( res[["vectors : VectorX"]][["Vec"]], double(5), msg = "VectorXf::Zero(5)") checkEquals( res[["vectors : VectorX"]][["Vec"]], integer(5), msg = "VectorXi::Zero(5)") checkEquals( res[["matrices : MatrixX"]][["Mat"]], (1+0i) * diag(nr=3L), msg = "MatrixXcd::Identity(3,3)") checkEquals( res[["matrices : MatrixX"]][["Mat"]], diag(nr=3L), msg = "MatrixXd::Identity(3,3)") checkEquals( res[["matrices : MatrixX"]][["Mat"]], diag(nr=3L), msg = "MatrixXf::Identity(3,3)") checkEquals( res[["matrices : MatrixX"]][["Mat"]], matrix(as.integer((diag(nr=3L))),nr=3L), msg = "MatrixXi::Identity(3,3)") checkEquals( res[["rows : RowVectorX"]][["Row"]], matrix(complex(5), nr=1L), msg = "RowVectorXcd::Zero(5)") checkEquals( res[["rows : RowVectorX"]][["Row"]], matrix(numeric(5), nr=1L), msg = "RowVectorXd::Zero(5)") checkEquals( res[["rows : RowVectorX"]][["Row"]], matrix(numeric(5), nr=1L), msg = "RowVectorXf::Zero(5)") checkEquals( res[["rows : RowVectorX"]][["Row"]], matrix(integer(5), nr=1L), msg = "RowVectorXi::Zero(5)") checkEquals( res[["columns : MatrixX"]][["Col"]], as.matrix(complex(5)), msg = "MatrixXcd::Zero(5, 1)") checkEquals( res[["columns : MatrixX"]][["Col"]], as.matrix(numeric(5)), msg = "MatrixXd::Zero(5, 1)") checkEquals( res[["columns : MatrixX"]][["Col"]], as.matrix(numeric(5)), msg = "MatrixXf::Zero(5, 1)") checkEquals( res[["columns : MatrixX"]][["Col"]], as.matrix(integer(5)), msg = "MatrixXi::Zero(5, 1)") checkEquals( res[["arrays2d : ArrayXX"]][["Arr2"]], matrix(complex(9L), nc=3L), msg = "ArrayXXcd::Zero(3,3)") checkEquals( res[["arrays2d : ArrayXX"]][["Arr2"]], matrix(numeric(9L), nc=3L), msg = "ArrayXXd::Zero(3,3)") checkEquals( res[["arrays2d : ArrayXX"]][["Arr2"]], matrix(numeric(9L), nc=3L), msg = "ArrayXXf::Zero(3,3)") checkEquals( res[["arrays2d : ArrayXX"]][["Arr2"]], matrix(integer(9L), nc=3L), msg = "ArrayXXi::Zero(3,3)") checkEquals( res[["arrays1d : ArrayX"]][["Arr1"]], complex(5), msg = "ArrayXcd::Zero(5)") checkEquals( res[["arrays1d : ArrayX"]][["Arr1"]], double(5), msg = "ArrayXd::Zero(5)") checkEquals( res[["arrays1d : ArrayX"]][["Arr1"]], double(5), msg = "ArrayXf::Zero(5)") checkEquals( res[["arrays1d : ArrayX"]][["Arr1"]], integer(5), msg = "ArrayXi::Zero(5)") oneTen <- seq(1, 10, length.out=6L) checkEquals( res[["operations : ArrayXd"]][["Op_seq"]], oneTen, msg = "Op_seq") checkEquals( res[["operations : ArrayXd"]][["Op_log"]], log(oneTen), msg = "Op_log") checkEquals( res[["operations : ArrayXd"]][["Op_exp"]], exp(oneTen), msg = "Op_exp") checkEquals( res[["operations : ArrayXd"]][["Op_sqrt"]], sqrt(oneTen), msg = "Op_sqrt") checkEquals( res[["operations : ArrayXd"]][["Op_cos"]], cos(oneTen), msg = "Op_cos") } test.as.Vec <- function(){ fx <- cxxfunction( signature(input_ = "list" ) , ' List input(input_) ; Eigen::VectorXi m1 = input[0] ; /* implicit as */ Eigen::VectorXd m2 = input[1] ; /* implicit as */ Eigen::Matrix m3 = input[0] ; /* implicit as */ Eigen::VectorXf m4 = input[1] ; /* implicit as */ List res = List::create(m1.sum(), m2.sum(), m3.sum(), m4.sum()); return res ; ', plugin = "RcppEigen" ) res <- fx( list( 1:10, as.numeric(1:10) ) ) checkEquals( unlist( res ), rep(55.0, 4 ), msg = "as" ) } test.as.MVec <- function(){ fx <- cxxfunction( signature(input_ = "list" ) , ' List input(input_) ; const Eigen::Map m1 = input[0] ; // maps share storage and do not allow conversion const Eigen::Map m2 = input[1] ; List res = List::create(m1.sum(), m2.sum()); return res ; ', plugin = "RcppEigen" ) res <- fx( list( 1:10, as.numeric(1:10) ) ) checkEquals( unlist( res ), rep(55.0, 2 ), msg = "as" ) } test.as.MMat <- function(){ fx <- cxxfunction( signature(input_ = "list" ) , ' List input(input_) ; const Eigen::Map m1 = input[0]; // maps share storage and do not allow conversion const Eigen::Map m2 = input[1] ; // FIXME: Write a version of as specifically for complex matrices. // const Eigen::Map m3 = input[2] ; List res = List::create(m1.sum(), m2.sum());//, m3.sum()); return res ; ', plugin = "RcppEigen" ) integer_mat <- matrix(as.integer(diag(nr=4L)), nc=4L) numeric_mat <- diag(nr=5L) complex_mat <- (1+0i) * diag(nr=5L) res <- fx(list(integer_mat, numeric_mat, complex_mat)) checkEquals(unlist(res), c(4L, 5)#, 5+0i) , msg = "as" ) } test.as.MSpMat <- function() { suppressMessages(require("Matrix")) data("KNex", package = "Matrix") fx <- cxxfunction( signature(input_ = "list"), ' List input(input_) ; const Eigen::MappedSparseMatrix m1 = input[0]; // maps share storage and do not allow conversion List res = List::create(_["nnz"] = m1.nonZeros(), _["nr"] = m1.rows(), _["nc"] = m1.cols(), _["inSz"] = m1.innerSize(), _["outSz"] = m1.outerSize(), _["sum"] = m1.sum()); return res ; ', plugin = "RcppEigen" ) KNX <- KNex[[1]] res <- fx(KNex) checkEquals(unname(unlist(res)), c(nnzero(KNX), nrow(KNX), ncol(KNX), nrow(KNX), ncol(KNX), sum(KNX@x)), msg = "as") } test.as.SpMat <- function() { suppressMessages(require("Matrix")) data("KNex", package = "Matrix") fx <- cxxfunction( signature(input_ = "list"), ' List input(input_) ; const Eigen::SparseMatrix m1 = input[0]; List res = List::create(_["nnz"] = m1.nonZeros(), _["nr"] = m1.rows(), _["nc"] = m1.cols(), _["inSz"] = m1.innerSize(), _["outSz"] = m1.outerSize(), _["sum"] = m1.sum()); return res ; ', plugin = "RcppEigen" ) KNX <- KNex[[1]] res <- fx(KNex) checkEquals(unname(unlist(res)), c(nnzero(KNX), nrow(KNX), ncol(KNX), nrow(KNX), ncol(KNX), sum(KNX@x)), msg = "as") } RcppEigen/inst/unitTests/runTests.R0000644000176200001440000000700712253717461017073 0ustar liggesusers pkg <- "RcppEigen" if ( ! require( "inline", character.only = TRUE, quietly = TRUE ) ){ stop( "The inline package is required to run RcppEigen unit tests" ) } if ( compareVersion( packageDescription( "inline" )[["Version"]], "0.3.5" ) < 0 ){ stop( "RcppEigen unit tests need at least the version 0.3.5 of inline" ) } if (require("RUnit", quietly = TRUE)) { is_local <- function(){ if ( exists( "argv", globalenv() ) && "--local" %in% argv ) return(TRUE) if ( "--local" %in% commandArgs(TRUE) ) return(TRUE) FALSE } if ( is_local() ) path <- getwd() library(package=pkg, character.only = TRUE) if (!(exists("path") && file.exists(path))) path <- system.file("unitTests", package = pkg) ## --- Testing --- ## Define tests testSuite <- defineTestSuite(name=paste(pkg, "unit testing"), dirs = path) if (interactive()) { cat("Now have RUnit Test Suite 'testSuite' for package '", pkg, "' :\n", sep='') str(testSuite) cat('', "Consider doing", "\t tests <- runTestSuite(testSuite)", "\nand later", "\t printTextProtocol(tests)", '', sep="\n") } else { ## run from shell / Rscript / R CMD Batch / ... ## Run tests <- runTestSuite(testSuite) output <- NULL process_args <- function(argv){ if ( !is.null(argv) && length(argv) > 0 ){ rx <- "^--output=(.*)$" g <- grep( rx, argv, value = TRUE ) if ( length(g) ){ sub( rx, "\\1", g[1L] ) } } } # R CMD check uses this if ( exists( "RcppEigen.unit.test.output.dir", globalenv() ) ){ output <- RcppEigen.unit.test.output.dir } else { ## give a chance to the user to customize where he/she wants ## the unit tests results to be stored with the --output= command ## line argument if ( exists( "argv", globalenv() ) ){ ## littler output <- process_args(argv) } else { ## Rscript output <- process_args(commandArgs(TRUE)) } } if( is.null(output) ) { # if it did not work, use parent dir output <- ".." # as BDR does not want /tmp to be used } ## Print results output.txt <- file.path( output, sprintf("%s-unitTests.txt", pkg)) output.html <- file.path( output, sprintf("%s-unitTests.html", pkg)) printTextProtocol(tests, fileName=output.txt) message( sprintf( "saving txt unit test report to '%s'", output.txt ) ) ## Print HTML version to a file ## printHTMLProtocol has problems on Mac OS X if (Sys.info()["sysname"] != "Darwin"){ message( sprintf( "saving html unit test report to '%s'", output.html ) ) printHTMLProtocol(tests, fileName=output.html) } ## stop() if there are any failures i.e. FALSE to unit test. ## This will cause R CMD check to return error and stop err <- getErrors(tests) if ( (err$nFail + err$nErr) > 0) { stop( sprintf( "unit test problems: %d failures, %d errors", err$nFail, err$nErr) ) } else { success <- err$nTestFunc - err$nFail - err$nErr - err$nDeactivated cat( sprintf( "%d / %d\n", success, err$nTestFunc ) ) } } } else { cat("R package 'RUnit' cannot be loaded -- no unit tests run\n", "for package", pkg,"\n") } RcppEigen/inst/unitTests/runit.sparse.R0000644000176200001440000001556012647572760017714 0ustar liggesusers#!/usr/bin/r -t # # Copyright (C) 2011 Douglas Bates, Dirk Eddelbuettel and Romain Francois # # This file is part of RcppEigen # # RcppEigen is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # RcppEigen is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with RcppEigen. If not, see . .setUp <- function(){ suppressMessages(require(inline)) } test.wrapSparse.double.R <- function(){ fx <- cxxfunction( , ' Eigen::SparseMatrix mm(9,3); mm.reserve(9); for (int j = 0; j < 3; ++j) { mm.startVec(j); for (int i = 3 * j; i < 3 * (j + 1); ++i) mm.insertBack(i, j) = 1.; } mm.finalize(); return wrap(mm); ' , plugin = "RcppEigen" ) res <- fx() rr <- Matrix::t(as(gl(3,3), "sparseMatrix")) colnames(rr) <- NULL checkEquals( res, rr, msg = "wrap >") } test.wrapSparse.double.ColMajor.R <- function(){ fx <- cxxfunction( , ' Eigen::SparseMatrix mm(9,3); mm.reserve(9); for (int j = 0; j < 3; ++j) { mm.startVec(j); for (int i = 3 * j; i < 3 * (j + 1); ++i) mm.insertBack(i, j) = 1.; } mm.finalize(); return wrap(mm); ' , plugin = "RcppEigen" ) res <- fx() rr <- Matrix::t(as(gl(3,3), "sparseMatrix")) colnames(rr) <- NULL checkEquals( res, rr, msg = "wrap >") } ## test.wrapSparse.int.ColMajor.R <- function(){ ## classes not yet exported from Matrix ## fx <- cxxfunction( , ' ## Eigen::SparseMatrix mm(9,3); ## mm.reserve(9); ## for (int j = 0; j < 3; ++j) { ## mm.startVec(j); ## for (int i = 3 * j; i < 3 * (j + 1); ++i) ## mm.insertBack(i, j) = 1; ## } ## mm.finalize(); ## return wrap(mm); ## ' , plugin = "RcppEigen" ) ## #res <- fx() ## #rr <- Matrix::t(as(gl(3,3), "sparseMatrix")) ## #colnames(rr) <- NULL ## #checkEquals( res, rr, msg = "wrap >") ## checkException( fx(), msg = "wrap >" ) ## } test.wrapSparse.double.RowMajor.R <- function(){ fx <- cxxfunction( , ' Eigen::SparseMatrix mm(9,3); mm.reserve(9); for (int irow = 0; irow < 9; ++irow) { mm.startVec(irow); mm.insertBack(irow, irow / 3) = static_cast( 9 - irow ); } mm.finalize(); return wrap(mm); ' , plugin = "RcppEigen" ) res <- fx() rr <- new( "dgRMatrix", j=rep(0L:2L, each=3), p=0L:9L, x=as.numeric(9:1), Dim=c(9L,3L) ) colnames(rr) <- NULL checkEquals( res, rr, msg = "wrap >") } ## test.wrapSparse.int.RowMajor.R <- function(){ ## fx <- cxxfunction( , ' ## Eigen::SparseMatrix mm(9,3); ## mm.reserve(9); ## for (int irow = 0; irow < 9; ++irow) { ## mm.startVec(irow); ## mm.insertBack(irow, irow / 3) = 9 - irow; ## } ## mm.finalize(); ## return wrap(mm); ## ' , plugin = "RcppEigen" ) ## #res <- fx() ## #rr <- new( "igRMatrix", j=rep(0L:2L, each=3), p=0L:9L, x=9L:1L, Dim=c(9L,3L) ) ## #colnames(rr) <- NULL ## #checkEquals( res, rr, msg = "wrap >") ## checkException( fx(), msg = "wrap >" ) ## } test.asSparse.double.ColMajor.R <- function(){ fx <- cxxfunction( sig=signature(R_mm="dgCMatrix"), ' Eigen::SparseMatrix mm = Rcpp::as >( R_mm ); return wrap(mm); ' , plugin = "RcppEigen" ) rr <- Matrix::t(as(gl(3,3), "sparseMatrix")) colnames(rr) <- NULL res <- fx( R_mm = rr ) checkEquals( res, rr, msg = "as >") } test.asMappedSparse.double.ColMajor.R <- function(){ fx <- cxxfunction( sig=signature(R_mm="dgCMatrix"), ' typedef Eigen::MappedSparseMatrix MapMat; MapMat mm = Rcpp::as( R_mm ); return wrap(mm); ' , plugin = "RcppEigen" ) rr <- Matrix::t(as(gl(3,3), "sparseMatrix")) colnames(rr) <- NULL res <- fx( R_mm = rr ) checkEquals( res, rr, msg = "as >") } test.asSparse.double.RowMajor.R <- function(){ fx <- cxxfunction( sig=signature(R_mm="dgRMatrix"), ' Eigen::SparseMatrix mm = Rcpp::as >( R_mm ); return wrap(mm); ' , plugin = "RcppEigen" ) rr <- new( "dgRMatrix", j=rep(0L:2L, each=3), p=0L:9L, x=as.numeric(9:1), Dim=c(9L,3L) ) colnames(rr) <- NULL res <- fx( R_mm = rr ) checkEquals( res, rr, msg = "as >") } test.asMappedSparse.double.RowMajor.R <- function(){ fx <- cxxfunction( sig=signature(R_mm="dgRMatrix"), ' typedef Eigen::MappedSparseMatrix MapMat; MapMat mm = Rcpp::as( R_mm ); return wrap(mm); ' , plugin = "RcppEigen" ) rr <- new( "dgRMatrix", j=rep(0L:2L, each=3), p=0L:9L, x=as.numeric(9:1), Dim=c(9L,3L) ) colnames(rr) <- NULL res <- fx( R_mm = rr ) checkEquals( res, rr, msg = "as >") } test.sparseCholesky.R <- function() { suppressMessages(require("Matrix", character.only=TRUE)) data("KNex", package = "Matrix") fx <- cxxfunction( signature(input_ = "list"), ' using Eigen::VectorXd; using Eigen::MatrixXd; using Eigen::Lower; using Eigen::Map; using Eigen::MappedSparseMatrix; using Eigen::SparseMatrix; using Eigen::SimplicialLDLT; using Eigen::Success; List input(input_); const MappedSparseMatrix m1 = input[0]; const Map v1 = input[1]; SparseMatrix m2(m1.cols(), m1.cols()); m2.selfadjointView().rankUpdate(m1.adjoint()); SimplicialLDLT > ff(m2); VectorXd res = ff.solve(m1.adjoint() * v1); return List::create(_["res"] = res, _["rows"] = ff.rows(), _["cols"] = ff.cols()); ', plugin = "RcppEigen") rr <- fx(KNex) checkEquals(rr[[1]], as.vector(solve(crossprod(KNex[[1]]), crossprod(KNex[[1]], KNex[[2]])), mode="numeric"), "Cholmod solution") } RcppEigen/inst/skeleton/0000755000176200001440000000000012472772267014747 5ustar liggesusersRcppEigen/inst/skeleton/Makevars0000644000176200001440000000050612472772267016444 0ustar liggesusers## With Rcpp 0.11.0 and later, we no longer need to set PKG_LIBS as there is ## no user-facing library. The include path to headers is already set by R. #PKG_LIBS = ## With R 3.1.0 or later, you can uncomment the following line to tell R to ## enable compilation with C++11 (or even C++14) where available #CXX_STD = CXX11 RcppEigen/inst/skeleton/rcppeigen_hello_world.Rd0000644000176200001440000000250412472772267021605 0ustar liggesusers\name{RcppEigen-Functions} \alias{rcppeigen_hello_world} \alias{rcppeigen_innerproduct} \alias{rcppeigen_outerproduct} \alias{rcppeigen_bothproducts} \title{Set of functions in example RcppEigen package} \description{ These four functions are created when \code{RcppEigen.package.skeleton()} is invoked to create a skeleton packages. } \usage{ rcppeigen_hello_world() rcppeigen_outerproduct(x) rcppeigen_innerproduct(x) rcppeigen_bothproducts(x) } \arguments{ \item{x}{a numeric vector} } \value{ \code{rcppeigen_hello_world()} does not return a value, but displays a message to the console. \code{rcppeigen_outerproduct()} returns a numeric matrix computed as the outer (vector) product of \code{x}. \code{rcppeigen_innerproduct()} returns a double computer as the inner (vector) product of \code{x}. \code{rcppeigen_bothproducts()} returns a list with both the outer and inner products. } \details{ These are example functions which should be largely self-explanatory. Their main benefit is to demonstrate how to write a function using the Eigen C++ classes, and to have to such a function accessible from R. } \references{ See the documentation for Eigen, and RcppEigen, for more details. } \examples{ x <- sqrt(1:4) rcppeigen_innerproduct(x) rcppeigen_outerproduct(x) } \author{Dirk Eddelbuettel} RcppEigen/inst/skeleton/Makevars.win0000644000176200001440000000050612472772267017240 0ustar liggesusers## With Rcpp 0.11.0 and later, we no longer need to set PKG_LIBS as there is ## no user-facing library. The include path to headers is already set by R. #PKG_LIBS = ## With R 3.1.0 or later, you can uncomment the following line to tell R to ## enable compilation with C++11 (or even C++14) where available #CXX_STD = CXX11 RcppEigen/inst/skeleton/rcppeigen_hello_world.cpp0000644000176200001440000000273112304401032021771 0ustar liggesusers// -*- mode: C++; c-indent-level: 4; c-basic-offset: 4; indent-tabs-mode: nil; -*- // we only include RcppEigen.h which pulls Rcpp.h in for us #include // via the depends attribute we tell Rcpp to create hooks for // RcppEigen so that the build process will know what to do // // [[Rcpp::depends(RcppEigen)]] // simple example of creating two matrices and // returning the result of an operatioon on them // // via the exports attribute we tell Rcpp to make this function // available from R // // [[Rcpp::export]] Eigen::MatrixXd rcppeigen_hello_world() { Eigen::MatrixXd m1 = Eigen::MatrixXd::Identity(3, 3); Eigen::MatrixXd m2 = Eigen::MatrixXd::Random(3, 3); return m1 + 3 * (m1 + m2); } // another simple example: outer product of a vector, // returning a matrix // // [[Rcpp::export]] Eigen::MatrixXd rcppeigen_outerproduct(const Eigen::VectorXd & x) { Eigen::MatrixXd m = x * x.transpose(); return m; } // and the inner product returns a scalar // // [[Rcpp::export]] double rcppeigen_innerproduct(const Eigen::VectorXd & x) { double v = x.transpose() * x; return v; } // and we can use Rcpp::List to return both at the same time // // [[Rcpp::export]] Rcpp::List rcppeigen_bothproducts(const Eigen::VectorXd & x) { Eigen::MatrixXd op = x * x.transpose(); double ip = x.transpose() * x; return Rcpp::List::create(Rcpp::Named("outer")=op, Rcpp::Named("inner")=ip); } RcppEigen/inst/include/0000755000176200001440000000000012550462273014534 5ustar liggesusersRcppEigen/inst/include/RcppEigenCholmod.h0000644000176200001440000014070212253717461020075 0ustar liggesusers// -*- mode: C++; c-indent-level: 4; c-basic-offset: 4; tab-width: 8 -*- // // cholmod.h: selected headers from Tim Davis's CHOLMOD package // // Copyright (C) 2011 Douglas Bates, Martin Maechler, Dirk Eddelbuettel and Romain Francois // // This file is part of RcppEigen. // // RcppEigen is free software: you can redistribute it and/or modify it // under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // RcppEigen is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with RcppEigen. If not, see . #ifndef RcppEigen_CHOLMOD_H #define RcppEigen_CHOLMOD_H #ifdef __cplusplus extern "C" { #endif // from Matrix/src/SuiteSparse_config/SuiteSparse_config.h - line 51 : #ifndef SuiteSparse_long #ifdef _WIN64 #define SuiteSparse_long __int64 #define SuiteSparse_long_max _I64_MAX #define SuiteSparse_long_idd "I64d" #else #define SuiteSparse_long long #define SuiteSparse_long_max LONG_MAX #define SuiteSparse_long_idd "ld" #endif #define SuiteSparse_long_id "%" SuiteSparse_long_idd #endif /* For backward compatibility with prior versions of SuiteSparse. The UF_* * macros are deprecated and will be removed in a future version. */ #ifndef UF_long #define UF_long SuiteSparse_long #define UF_long_max SuiteSparse_long_max #define UF_long_idd SuiteSparse_long_idd #define UF_long_id SuiteSparse_long_id #endif #define CHOLMOD_HAS_VERSION_FUNCTION #define CHOLMOD_DATE "April 25, 2013" #define CHOLMOD_VER_CODE(main,sub) ((main) * 1000 + (sub)) #define CHOLMOD_MAIN_VERSION 2 #define CHOLMOD_SUB_VERSION 1 #define CHOLMOD_SUBSUB_VERSION 2 #define CHOLMOD_VERSION \ CHOLMOD_VER_CODE(CHOLMOD_MAIN_VERSION,CHOLMOD_SUB_VERSION) // from ../../src/CHOLMOD/Include/cholmod_core.h - line 275 : /* Each CHOLMOD object has its own type code. */ #define CHOLMOD_COMMON 0 #define CHOLMOD_SPARSE 1 #define CHOLMOD_FACTOR 2 #define CHOLMOD_DENSE 3 #define CHOLMOD_TRIPLET 4 /* ========================================================================== */ /* === CHOLMOD Common ======================================================= */ /* ========================================================================== */ /* itype defines the types of integer used: */ #define CHOLMOD_INT 0 /* all integer arrays are int */ #define CHOLMOD_INTLONG 1 /* most are int, some are SuiteSparse_long */ #define CHOLMOD_LONG 2 /* all integer arrays are SuiteSparse_long */ /* The itype of all parameters for all CHOLMOD routines must match. * FUTURE WORK: CHOLMOD_INTLONG is not yet supported. */ /* dtype defines what the numerical type is (double or float): */ #define CHOLMOD_DOUBLE 0 /* all numerical values are double */ #define CHOLMOD_SINGLE 1 /* all numerical values are float */ /* The dtype of all parameters for all CHOLMOD routines must match. * * Scalar floating-point values are always passed as double arrays of size 2 * (for the real and imaginary parts). They are typecast to float as needed. * FUTURE WORK: the float case is not supported yet. */ /* xtype defines the kind of numerical values used: */ #define CHOLMOD_PATTERN 0 /* pattern only, no numerical values */ #define CHOLMOD_REAL 1 /* a real matrix */ #define CHOLMOD_COMPLEX 2 /* a complex matrix (ANSI C99 compatible) */ #define CHOLMOD_ZOMPLEX 3 /* a complex matrix (MATLAB compatible) */ /* Definitions for cholmod_common: */ #define CHOLMOD_MAXMETHODS 9 /* maximum number of different methods that */ /* cholmod_analyze can try. Must be >= 9. */ /* Common->status values. zero means success, negative means a fatal error, * positive is a warning. */ #define CHOLMOD_OK 0 /* success */ #define CHOLMOD_NOT_INSTALLED (-1) /* failure: method not installed */ #define CHOLMOD_OUT_OF_MEMORY (-2) /* failure: out of memory */ #define CHOLMOD_TOO_LARGE (-3) /* failure: integer overflow occured */ #define CHOLMOD_INVALID (-4) /* failure: invalid input */ #define CHOLMOD_GPU_PROBLEM (-5) /* failure: GPU fatal error */ #define CHOLMOD_NOT_POSDEF (1) /* warning: matrix not pos. def. */ #define CHOLMOD_DSMALL (2) /* warning: D for LDL' or diag(L) or */ /* LL' has tiny absolute value */ /* ordering method (also used for L->ordering) */ #define CHOLMOD_NATURAL 0 /* use natural ordering */ #define CHOLMOD_GIVEN 1 /* use given permutation */ #define CHOLMOD_AMD 2 /* use minimum degree (AMD) */ #define CHOLMOD_METIS 3 /* use METIS' nested dissection */ #define CHOLMOD_NESDIS 4 /* use CHOLMOD's version of nested dissection:*/ /* node bisector applied recursively, followed * by constrained minimum degree (CSYMAMD or * CCOLAMD) */ #define CHOLMOD_COLAMD 5 /* use AMD for A, COLAMD for A*A' */ /* POSTORDERED is not a method, but a result of natural ordering followed by a * weighted postorder. It is used for L->ordering, not method [ ].ordering. */ #define CHOLMOD_POSTORDERED 6 /* natural ordering, postordered. */ /* supernodal strategy (for Common->supernodal) */ #define CHOLMOD_SIMPLICIAL 0 /* always do simplicial */ #define CHOLMOD_AUTO 1 /* select simpl/super depending on matrix */ #define CHOLMOD_SUPERNODAL 2 /* always do supernodal */ typedef struct cholmod_common_struct { /* ---------------------------------------------------------------------- */ /* parameters for symbolic/numeric factorization and update/downdate */ /* ---------------------------------------------------------------------- */ double dbound ; /* Smallest absolute value of diagonal entries of D * for LDL' factorization and update/downdate/rowadd/ * rowdel, or the diagonal of L for an LL' factorization. * Entries in the range 0 to dbound are replaced with dbound. * Entries in the range -dbound to 0 are replaced with -dbound. No * changes are made to the diagonal if dbound <= 0. Default: zero */ double grow0 ; /* For a simplicial factorization, L->i and L->x can * grow if necessary. grow0 is the factor by which * it grows. For the initial space, L is of size MAX (1,grow0) times * the required space. If L runs out of space, the new size of L is * MAX(1.2,grow0) times the new required space. If you do not plan on * modifying the LDL' factorization in the Modify module, set grow0 to * zero (or set grow2 to 0, see below). Default: 1.2 */ double grow1 ; size_t grow2 ; /* For a simplicial factorization, each column j of L * is initialized with space equal to * grow1*L->ColCount[j] + grow2. If grow0 < 1, grow1 < 1, or grow2 == 0, * then the space allocated is exactly equal to L->ColCount[j]. If the * column j runs out of space, it increases to grow1*need + grow2 in * size, where need is the total # of nonzeros in that column. If you do * not plan on modifying the factorization in the Modify module, set * grow2 to zero. Default: grow1 = 1.2, grow2 = 5. */ size_t maxrank ; /* rank of maximum update/downdate. Valid values: * 2, 4, or 8. A value < 2 is set to 2, and a * value > 8 is set to 8. It is then rounded up to the next highest * power of 2, if not already a power of 2. Workspace (Xwork, below) of * size nrow-by-maxrank double's is allocated for the update/downdate. * If an update/downdate of rank-k is requested, with k > maxrank, * it is done in steps of maxrank. Default: 8, which is fastest. * Memory usage can be reduced by setting maxrank to 2 or 4. */ double supernodal_switch ; /* supernodal vs simplicial factorization */ int supernodal ; /* If Common->supernodal <= CHOLMOD_SIMPLICIAL * (0) then cholmod_analyze performs a * simplicial analysis. If >= CHOLMOD_SUPERNODAL (2), then a supernodal * analysis is performed. If == CHOLMOD_AUTO (1) and * flop/nnz(L) < Common->supernodal_switch, then a simplicial analysis * is done. A supernodal analysis done otherwise. * Default: CHOLMOD_AUTO. Default supernodal_switch = 40 */ int final_asis ; /* If TRUE, then ignore the other final_* parameters * (except for final_pack). * The factor is left as-is when done. Default: TRUE.*/ int final_super ; /* If TRUE, leave a factor in supernodal form when * supernodal factorization is finished. If FALSE, * then convert to a simplicial factor when done. * Default: TRUE */ int final_ll ; /* If TRUE, leave factor in LL' form when done. * Otherwise, leave in LDL' form. Default: FALSE */ int final_pack ; /* If TRUE, pack the columns when done. If TRUE, and * cholmod_factorize is called with a symbolic L, L is * allocated with exactly the space required, using L->ColCount. If you * plan on modifying the factorization, set Common->final_pack to FALSE, * and each column will be given a little extra slack space for future * growth in fill-in due to updates. Default: TRUE */ int final_monotonic ; /* If TRUE, ensure columns are monotonic when done. * Default: TRUE */ int final_resymbol ;/* if cholmod_factorize performed a supernodal * factorization, final_resymbol is true, and * final_super is FALSE (convert a simplicial numeric factorization), * then numerically zero entries that resulted from relaxed supernodal * amalgamation are removed. This does not remove entries that are zero * due to exact numeric cancellation, since doing so would break the * update/downdate rowadd/rowdel routines. Default: FALSE. */ /* supernodal relaxed amalgamation parameters: */ double zrelax [3] ; size_t nrelax [3] ; /* Let ns be the total number of columns in two adjacent supernodes. * Let z be the fraction of zero entries in the two supernodes if they * are merged (z includes zero entries from prior amalgamations). The * two supernodes are merged if: * (ns <= nrelax [0]) || (no new zero entries added) || * (ns <= nrelax [1] && z < zrelax [0]) || * (ns <= nrelax [2] && z < zrelax [1]) || (z < zrelax [2]) * * Default parameters result in the following rule: * (ns <= 4) || (no new zero entries added) || * (ns <= 16 && z < 0.8) || (ns <= 48 && z < 0.1) || (z < 0.05) */ int prefer_zomplex ; /* X = cholmod_solve (sys, L, B, Common) computes * x=A\b or solves a related system. If L and B are * both real, then X is real. Otherwise, X is returned as * CHOLMOD_COMPLEX if Common->prefer_zomplex is FALSE, or * CHOLMOD_ZOMPLEX if Common->prefer_zomplex is TRUE. This parameter * is needed because there is no supernodal zomplex L. Suppose the * caller wants all complex matrices to be stored in zomplex form * (MATLAB, for example). A supernodal L is returned in complex form * if A is zomplex. B can be real, and thus X = cholmod_solve (L,B) * should return X as zomplex. This cannot be inferred from the input * arguments L and B. Default: FALSE, since all data types are * supported in CHOLMOD_COMPLEX form and since this is the native type * of LAPACK and the BLAS. Note that the MATLAB/cholmod.c mexFunction * sets this parameter to TRUE, since MATLAB matrices are in * CHOLMOD_ZOMPLEX form. */ int prefer_upper ; /* cholmod_analyze and cholmod_factorize work * fastest when a symmetric matrix is stored in * upper triangular form when a fill-reducing ordering is used. In * MATLAB, this corresponds to how x=A\b works. When the matrix is * ordered as-is, they work fastest when a symmetric matrix is in lower * triangular form. In MATLAB, R=chol(A) does the opposite. This * parameter affects only how cholmod_read returns a symmetric matrix. * If TRUE (the default case), a symmetric matrix is always returned in * upper-triangular form (A->stype = 1). */ int quick_return_if_not_posdef ; /* if TRUE, the supernodal numeric * factorization will return quickly if * the matrix is not positive definite. Default: FALSE. */ /* ---------------------------------------------------------------------- */ /* printing and error handling options */ /* ---------------------------------------------------------------------- */ int print ; /* print level. Default: 3 */ int precise ; /* if TRUE, print 16 digits. Otherwise print 5 */ int (*print_function) (const char *, ...) ; /* pointer to printf */ int try_catch ; /* if TRUE, then ignore errors; CHOLMOD is in the middle * of a try/catch block. No error message is printed * and the Common->error_handler function is not called. */ void (*error_handler) (int status, const char *file, int line, const char *message) ; /* Common->error_handler is the user's error handling routine. If not * NULL, this routine is called if an error occurs in CHOLMOD. status * can be CHOLMOD_OK (0), negative for a fatal error, and positive for * a warning. file is a string containing the name of the source code * file where the error occured, and line is the line number in that * file. message is a string describing the error in more detail. */ /* ---------------------------------------------------------------------- */ /* ordering options */ /* ---------------------------------------------------------------------- */ /* The cholmod_analyze routine can try many different orderings and select * the best one. It can also try one ordering method multiple times, with * different parameter settings. The default is to use three orderings, * the user's permutation (if provided), AMD which is the fastest ordering * and generally gives good fill-in, and METIS. CHOLMOD's nested dissection * (METIS with a constrained AMD) usually gives a better ordering than METIS * alone (by about 5% to 10%) but it takes more time. * * If you know the method that is best for your matrix, set Common->nmethods * to 1 and set Common->method [0] to the set of parameters for that method. * If you set it to 1 and do not provide a permutation, then only AMD will * be called. * * If METIS is not available, the default # of methods tried is 2 (the user * permutation, if any, and AMD). * * To try other methods, set Common->nmethods to the number of methods you * want to try. The suite of default methods and their parameters is * described in the cholmod_defaults routine, and summarized here: * * Common->method [i]: * i = 0: user-provided ordering (cholmod_analyze_p only) * i = 1: AMD (for both A and A*A') * i = 2: METIS * i = 3: CHOLMOD's nested dissection (NESDIS), default parameters * i = 4: natural * i = 5: NESDIS with nd_small = 20000 * i = 6: NESDIS with nd_small = 4, no constrained minimum degree * i = 7: NESDIS with no dense node removal * i = 8: AMD for A, COLAMD for A*A' * * You can modify the suite of methods you wish to try by modifying * Common.method [...] after calling cholmod_start or cholmod_defaults. * * For example, to use AMD, followed by a weighted postordering: * * Common->nmethods = 1 ; * Common->method [0].ordering = CHOLMOD_AMD ; * Common->postorder = TRUE ; * * To use the natural ordering (with no postordering): * * Common->nmethods = 1 ; * Common->method [0].ordering = CHOLMOD_NATURAL ; * Common->postorder = FALSE ; * * If you are going to factorize hundreds or more matrices with the same * nonzero pattern, you may wish to spend a great deal of time finding a * good permutation. In this case, try setting Common->nmethods to 9. * The time spent in cholmod_analysis will be very high, but you need to * call it only once. * * cholmod_analyze sets Common->current to a value between 0 and nmethods-1. * Each ordering method uses the set of options defined by this parameter. */ int nmethods ; /* The number of ordering methods to try. Default: 0. * nmethods = 0 is a special case. cholmod_analyze * will try the user-provided ordering (if given) and AMD. Let fl and * lnz be the flop count and nonzeros in L from AMD's ordering. Let * anz be the number of nonzeros in the upper or lower triangular part * of the symmetric matrix A. If fl/lnz < 500 or lnz/anz < 5, then this * is a good ordering, and METIS is not attempted. Otherwise, METIS is * tried. The best ordering found is used. If nmethods > 0, the * methods used are given in the method[ ] array, below. The first * three methods in the default suite of orderings is (1) use the given * permutation (if provided), (2) use AMD, and (3) use METIS. Maximum * allowed value is CHOLMOD_MAXMETHODS. */ int current ; /* The current method being tried. Default: 0. Valid * range is 0 to nmethods-1. */ int selected ; /* The best method found. */ /* The suite of ordering methods and parameters: */ struct cholmod_method_struct { /* statistics for this method */ double lnz ; /* nnz(L) excl. zeros from supernodal amalgamation, * for a "pure" L */ double fl ; /* flop count for a "pure", real simplicial LL' * factorization, with no extra work due to * amalgamation. Subtract n to get the LDL' flop count. Multiply * by about 4 if the matrix is complex or zomplex. */ /* ordering method parameters */ double prune_dense ;/* dense row/col control for AMD, SYMAMD, CSYMAMD, * and NESDIS (cholmod_nested_dissection). For a * symmetric n-by-n matrix, rows/columns with more than * MAX (16, prune_dense * sqrt (n)) entries are removed prior to * ordering. They appear at the end of the re-ordered matrix. * * If prune_dense < 0, only completely dense rows/cols are removed. * * This paramater is also the dense column control for COLAMD and * CCOLAMD. For an m-by-n matrix, columns with more than * MAX (16, prune_dense * sqrt (MIN (m,n))) entries are removed prior * to ordering. They appear at the end of the re-ordered matrix. * CHOLMOD factorizes A*A', so it calls COLAMD and CCOLAMD with A', * not A. Thus, this parameter affects the dense *row* control for * CHOLMOD's matrix, and the dense *column* control for COLAMD and * CCOLAMD. * * Removing dense rows and columns improves the run-time of the * ordering methods. It has some impact on ordering quality * (usually minimal, sometimes good, sometimes bad). * * Default: 10. */ double prune_dense2 ;/* dense row control for COLAMD and CCOLAMD. * Rows with more than MAX (16, dense2 * sqrt (n)) * for an m-by-n matrix are removed prior to ordering. CHOLMOD's * matrix is transposed before ordering it with COLAMD or CCOLAMD, * so this controls the dense *columns* of CHOLMOD's matrix, and * the dense *rows* of COLAMD's or CCOLAMD's matrix. * * If prune_dense2 < 0, only completely dense rows/cols are removed. * * Default: -1. Note that this is not the default for COLAMD and * CCOLAMD. -1 is best for Cholesky. 10 is best for LU. */ double nd_oksep ; /* in NESDIS, when a node separator is computed, it * discarded if nsep >= nd_oksep*n, where nsep is * the number of nodes in the separator, and n is the size of the * graph being cut. Valid range is 0 to 1. If 1 or greater, the * separator is discarded if it consists of the entire graph. * Default: 1 */ double other1 [4] ; /* future expansion */ size_t nd_small ; /* do not partition graphs with fewer nodes than * nd_small, in NESDIS. Default: 200 (same as * METIS) */ size_t other2 [4] ; /* future expansion */ int aggressive ; /* Aggresive absorption in AMD, COLAMD, SYMAMD, * CCOLAMD, and CSYMAMD. Default: TRUE */ int order_for_lu ; /* CCOLAMD can be optimized to produce an ordering * for LU or Cholesky factorization. CHOLMOD only * performs a Cholesky factorization. However, you may wish to use * CHOLMOD as an interface for CCOLAMD but use it for your own LU * factorization. In this case, order_for_lu should be set to FALSE. * When factorizing in CHOLMOD itself, you should *** NEVER *** set * this parameter FALSE. Default: TRUE. */ int nd_compress ; /* If TRUE, compress the graph and subgraphs before * partitioning them in NESDIS. Default: TRUE */ int nd_camd ; /* If 1, follow the nested dissection ordering * with a constrained minimum degree ordering that * respects the partitioning just found (using CAMD). If 2, use * CSYMAMD instead. If you set nd_small very small, you may not need * this ordering, and can save time by setting it to zero (no * constrained minimum degree ordering). Default: 1. */ int nd_components ; /* The nested dissection ordering finds a node * separator that splits the graph into two parts, * which may be unconnected. If nd_components is TRUE, each of * these connected components is split independently. If FALSE, * each part is split as a whole, even if it consists of more than * one connected component. Default: FALSE */ /* fill-reducing ordering to use */ int ordering ; size_t other3 [4] ; /* future expansion */ } method [CHOLMOD_MAXMETHODS + 1] ; int postorder ; /* If TRUE, cholmod_analyze follows the ordering with a * weighted postorder of the elimination tree. Improves * supernode amalgamation. Does not affect fundamental nnz(L) and * flop count. Default: TRUE. */ /* ---------------------------------------------------------------------- */ /* memory management routines */ /* ---------------------------------------------------------------------- */ void *(*malloc_memory) (size_t) ; /* pointer to malloc */ void *(*realloc_memory) (void *, size_t) ; /* pointer to realloc */ void (*free_memory) (void *) ; /* pointer to free */ void *(*calloc_memory) (size_t, size_t) ; /* pointer to calloc */ /* ---------------------------------------------------------------------- */ /* routines for complex arithmetic */ /* ---------------------------------------------------------------------- */ int (*complex_divide) (double ax, double az, double bx, double bz, double *cx, double *cz) ; /* flag = complex_divide (ax, az, bx, bz, &cx, &cz) computes the complex * division c = a/b, where ax and az hold the real and imaginary part * of a, and b and c are stored similarly. flag is returned as 1 if * a divide-by-zero occurs, or 0 otherwise. By default, the function * pointer Common->complex_divide is set equal to cholmod_divcomplex. */ double (*hypotenuse) (double x, double y) ; /* s = hypotenuse (x,y) computes s = sqrt (x*x + y*y), but does so more * accurately. By default, the function pointer Common->hypotenuse is * set equal to cholmod_hypot. See also the hypot function in the C99 * standard, which has an identical syntax and function. If you have * a C99-compliant compiler, you can set Common->hypotenuse = hypot. */ /* ---------------------------------------------------------------------- */ /* METIS workarounds */ /* ---------------------------------------------------------------------- */ double metis_memory ; /* This is a parameter for CHOLMOD's interface to * METIS, not a parameter to METIS itself. METIS * uses an amount of memory that is difficult to estimate precisely * beforehand. If it runs out of memory, it terminates your program. * All routines in CHOLMOD except for CHOLMOD's interface to METIS * return an error status and safely return to your program if they run * out of memory. To mitigate this problem, the CHOLMOD interface * can allocate a single block of memory equal in size to an empirical * upper bound of METIS's memory usage times the Common->metis_memory * parameter, and then immediately free it. It then calls METIS. If * this pre-allocation fails, it is possible that METIS will fail as * well, and so CHOLMOD returns with an out-of-memory condition without * calling METIS. * * METIS_NodeND (used in the CHOLMOD_METIS ordering option) with its * default parameter settings typically uses about (4*nz+40n+4096) * times sizeof(int) memory, where nz is equal to the number of entries * in A for the symmetric case or AA' if an unsymmetric matrix is * being ordered (where nz includes both the upper and lower parts * of A or AA'). The observed "upper bound" (with 2 exceptions), * measured in an instrumented copy of METIS 4.0.1 on thousands of * matrices, is (10*nz+50*n+4096) * sizeof(int). Two large matrices * exceeded this bound, one by almost a factor of 2 (Gupta/gupta2). * * If your program is terminated by METIS, try setting metis_memory to * 2.0, or even higher if needed. By default, CHOLMOD assumes that METIS * does not have this problem (so that CHOLMOD will work correctly when * this issue is fixed in METIS). Thus, the default value is zero. * This work-around is not guaranteed anyway. * * If a matrix exceeds this predicted memory usage, AMD is attempted * instead. It, too, may run out of memory, but if it does so it will * not terminate your program. */ double metis_dswitch ; /* METIS_NodeND in METIS 4.0.1 gives a seg */ size_t metis_nswitch ; /* fault with one matrix of order n = 3005 and * nz = 6,036,025. This is a very dense graph. * The workaround is to use AMD instead of METIS for matrices of dimension * greater than Common->metis_nswitch (default 3000) or more and with * density of Common->metis_dswitch (default 0.66) or more. * cholmod_nested_dissection has no problems with the same matrix, even * though it uses METIS_NodeComputeSeparator on this matrix. If this * seg fault does not affect you, set metis_nswitch to zero or less, * and CHOLMOD will not switch to AMD based just on the density of the * matrix (it will still switch to AMD if the metis_memory parameter * causes the switch). */ /* ---------------------------------------------------------------------- */ /* workspace */ /* ---------------------------------------------------------------------- */ /* CHOLMOD has several routines that take less time than the size of * workspace they require. Allocating and initializing the workspace would * dominate the run time, unless workspace is allocated and initialized * just once. CHOLMOD allocates this space when needed, and holds it here * between calls to CHOLMOD. cholmod_start sets these pointers to NULL * (which is why it must be the first routine called in CHOLMOD). * cholmod_finish frees the workspace (which is why it must be the last * call to CHOLMOD). */ size_t nrow ; /* size of Flag and Head */ UF_long mark ; /* mark value for Flag array */ size_t iworksize ; /* size of Iwork. Upper bound: 6*nrow+ncol */ size_t xworksize ; /* size of Xwork, in bytes. * maxrank*nrow*sizeof(double) for update/downdate. * 2*nrow*sizeof(double) otherwise */ /* initialized workspace: contents needed between calls to CHOLMOD */ void *Flag ; /* size nrow, an integer array. Kept cleared between * calls to cholmod rouines (Flag [i] < mark) */ void *Head ; /* size nrow+1, an integer array. Kept cleared between * calls to cholmod routines (Head [i] = EMPTY) */ void *Xwork ; /* a double array. Its size varies. It is nrow for * most routines (cholmod_rowfac, cholmod_add, * cholmod_aat, cholmod_norm, cholmod_ssmult) for the real case, twice * that when the input matrices are complex or zomplex. It is of size * 2*nrow for cholmod_rowadd and cholmod_rowdel. For cholmod_updown, * its size is maxrank*nrow where maxrank is 2, 4, or 8. Kept cleared * between calls to cholmod (set to zero). */ /* uninitialized workspace, contents not needed between calls to CHOLMOD */ void *Iwork ; /* size iworksize, 2*nrow+ncol for most routines, * up to 6*nrow+ncol for cholmod_analyze. */ int itype ; /* If CHOLMOD_LONG, Flag, Head, and Iwork are UF_long. * Otherwise all three arrays are int. */ int dtype ; /* double or float */ /* Common->itype and Common->dtype are used to define the types of all * sparse matrices, triplet matrices, dense matrices, and factors * created using this Common struct. The itypes and dtypes of all * parameters to all CHOLMOD routines must match. */ int no_workspace_reallocate ; /* this is an internal flag, used as a * precaution by cholmod_analyze. It is normally false. If true, * cholmod_allocate_work is not allowed to reallocate any workspace; * they must use the existing workspace in Common (Iwork, Flag, Head, * and Xwork). Added for CHOLMOD v1.1 */ /* ---------------------------------------------------------------------- */ /* statistics */ /* ---------------------------------------------------------------------- */ /* fl and lnz are set only in cholmod_analyze and cholmod_rowcolcounts, * in the Cholesky modudle. modfl is set only in the Modify module. */ int status ; /* error code */ double fl ; /* LL' flop count from most recent analysis */ double lnz ; /* fundamental nz in L */ double anz ; /* nonzeros in tril(A) if A is symmetric/lower, * triu(A) if symmetric/upper, or tril(A*A') if * unsymmetric, in last call to cholmod_analyze. */ double modfl ; /* flop count from most recent update/downdate/ * rowadd/rowdel (excluding flops to modify the * solution to Lx=b, if computed) */ size_t malloc_count ; /* # of objects malloc'ed minus the # free'd*/ size_t memory_usage ; /* peak memory usage in bytes */ size_t memory_inuse ; /* current memory usage in bytes */ double nrealloc_col ; /* # of column reallocations */ double nrealloc_factor ;/* # of factor reallocations due to col. reallocs */ double ndbounds_hit ; /* # of times diagonal modified by dbound */ double rowfacfl ; /* # of flops in last call to cholmod_rowfac */ double aatfl ; /* # of flops to compute A(:,f)*A(:,f)' */ /* ---------------------------------------------------------------------- */ /* future expansion */ /* ---------------------------------------------------------------------- */ /* To allow CHOLMOD to be updated without recompiling the user application, * additional space is set aside here for future statistics, parameters, * and workspace. Note: additional entries were added in v1.1 to the * method array, above, and thus v1.0 and v1.1 are not binary compatible. * * v1.1 to the current version are binary compatible. */ /* ---------------------------------------------------------------------- */ double other1 [10] ; double SPQR_xstat [4] ; /* for SuiteSparseQR statistics */ /* SuiteSparseQR control parameters: */ double SPQR_grain ; /* task size is >= max (total flops / grain) */ double SPQR_small ; /* task size is >= small */ /* ---------------------------------------------------------------------- */ UF_long SPQR_istat [10] ; /* for SuiteSparseQR statistics */ UF_long other2 [6] ; /* reduced from size 16 in v1.6 */ /* ---------------------------------------------------------------------- */ int other3 [10] ; /* reduced from size 16 in v1.1. */ int prefer_binary ; /* cholmod_read_triplet converts a symmetric * pattern-only matrix into a real matrix. If * prefer_binary is FALSE, the diagonal entries are set to 1 + the degree * of the row/column, and off-diagonal entries are set to -1 (resulting * in a positive definite matrix if the diagonal is zero-free). Most * symmetric patterns are the pattern a positive definite matrix. If * this parameter is TRUE, then the matrix is returned with a 1 in each * entry, instead. Default: FALSE. Added in v1.3. */ /* control parameter (added for v1.2): */ int default_nesdis ; /* Default: FALSE. If FALSE, then the default * ordering strategy (when Common->nmethods == 0) * is to try the given ordering (if present), AMD, and then METIS if AMD * reports high fill-in. If Common->default_nesdis is TRUE then NESDIS * is used instead in the default strategy. */ /* statistic (added for v1.2): */ int called_nd ; /* TRUE if the last call to * cholmod_analyze called NESDIS or METIS. */ int blas_ok ; /* FALSE if BLAS int overflow; TRUE otherwise */ /* SuiteSparseQR control parameters: */ int SPQR_shrink ; /* controls stack realloc method */ int SPQR_nthreads ; /* number of TBB threads, 0 = auto */ /* ---------------------------------------------------------------------- */ size_t other4 [16] ; /* ---------------------------------------------------------------------- */ void *other5 [16] ; } cholmod_common ; // in ../../src/CHOLMOD/Include/cholmod_core.h skip forward to - line 1114 : /* A sparse matrix stored in compressed-column form. */ typedef struct cholmod_sparse_struct { size_t nrow ; /* the matrix is nrow-by-ncol */ size_t ncol ; size_t nzmax ; /* maximum number of entries in the matrix */ /* pointers to int or UF_long: */ void *p ; /* p [0..ncol], the column pointers */ void *i ; /* i [0..nzmax-1], the row indices */ /* for unpacked matrices only: */ void *nz ; /* nz [0..ncol-1], the # of nonzeros in each col. In * packed form, the nonzero pattern of column j is in * A->i [A->p [j] ... A->p [j+1]-1]. In unpacked form, column j is in * A->i [A->p [j] ... A->p [j]+A->nz[j]-1] instead. In both cases, the * numerical values (if present) are in the corresponding locations in * the array x (or z if A->xtype is CHOLMOD_ZOMPLEX). */ /* pointers to double or float: */ void *x ; /* size nzmax or 2*nzmax, if present */ void *z ; /* size nzmax, if present */ int stype ; /* Describes what parts of the matrix are considered: * * 0: matrix is "unsymmetric": use both upper and lower triangular parts * (the matrix may actually be symmetric in pattern and value, but * both parts are explicitly stored and used). May be square or * rectangular. * >0: matrix is square and symmetric, use upper triangular part. * Entries in the lower triangular part are ignored. * <0: matrix is square and symmetric, use lower triangular part. * Entries in the upper triangular part are ignored. * * Note that stype>0 and stype<0 are different for cholmod_sparse and * cholmod_triplet. See the cholmod_triplet data structure for more * details. */ int itype ; /* CHOLMOD_INT: p, i, and nz are int. * CHOLMOD_INTLONG: p is UF_long, i and nz are int. * CHOLMOD_LONG: p, i, and nz are UF_long. */ int xtype ; /* pattern, real, complex, or zomplex */ int dtype ; /* x and z are double or float */ int sorted ; /* TRUE if columns are sorted, FALSE otherwise */ int packed ; /* TRUE if packed (nz ignored), FALSE if unpacked * (nz is required) */ } cholmod_sparse ; // in ../../src/CHOLMOD/Include/cholmod_core.h skip forward to - line 1554 : /* A symbolic and numeric factorization, either simplicial or supernodal. * In all cases, the row indices in the columns of L are kept sorted. */ typedef struct cholmod_factor_struct { /* ---------------------------------------------------------------------- */ /* for both simplicial and supernodal factorizations */ /* ---------------------------------------------------------------------- */ size_t n ; /* L is n-by-n */ size_t minor ; /* If the factorization failed, L->minor is the column * at which it failed (in the range 0 to n-1). A value * of n means the factorization was successful or * the matrix has not yet been factorized. */ /* ---------------------------------------------------------------------- */ /* symbolic ordering and analysis */ /* ---------------------------------------------------------------------- */ void *Perm ; /* size n, permutation used */ void *ColCount ; /* size n, column counts for simplicial L */ void *IPerm ; /* size n, inverse permutation. Only created by * cholmod_solve2 if Bset is used. */ /* ---------------------------------------------------------------------- */ /* simplicial factorization */ /* ---------------------------------------------------------------------- */ size_t nzmax ; /* size of i and x */ void *p ; /* p [0..ncol], the column pointers */ void *i ; /* i [0..nzmax-1], the row indices */ void *x ; /* x [0..nzmax-1], the numerical values */ void *z ; void *nz ; /* nz [0..ncol-1], the # of nonzeros in each column. * i [p [j] ... p [j]+nz[j]-1] contains the row indices, * and the numerical values are in the same locatins * in x. The value of i [p [k]] is always k. */ void *next ; /* size ncol+2. next [j] is the next column in i/x */ void *prev ; /* size ncol+2. prev [j] is the prior column in i/x. * head of the list is ncol+1, and the tail is ncol. */ /* ---------------------------------------------------------------------- */ /* supernodal factorization */ /* ---------------------------------------------------------------------- */ /* Note that L->x is shared with the simplicial data structure. L->x has * size L->nzmax for a simplicial factor, and size L->xsize for a supernodal * factor. */ size_t nsuper ; /* number of supernodes */ size_t ssize ; /* size of s, integer part of supernodes */ size_t xsize ; /* size of x, real part of supernodes */ size_t maxcsize ; /* size of largest update matrix */ size_t maxesize ; /* max # of rows in supernodes, excl. triangular part */ void *super ; /* size nsuper+1, first col in each supernode */ void *pi ; /* size nsuper+1, pointers to integer patterns */ void *px ; /* size nsuper+1, pointers to real parts */ void *s ; /* size ssize, integer part of supernodes */ /* ---------------------------------------------------------------------- */ /* factorization type */ /* ---------------------------------------------------------------------- */ int ordering ; /* ordering method used */ int is_ll ; /* TRUE if LL', FALSE if LDL' */ int is_super ; /* TRUE if supernodal, FALSE if simplicial */ int is_monotonic ; /* TRUE if columns of L appear in order 0..n-1. * Only applicable to simplicial numeric types. */ /* There are 8 types of factor objects that cholmod_factor can represent * (only 6 are used): * * Numeric types (xtype is not CHOLMOD_PATTERN) * -------------------------------------------- * * simplicial LDL': (is_ll FALSE, is_super FALSE). Stored in compressed * column form, using the simplicial components above (nzmax, p, i, * x, z, nz, next, and prev). The unit diagonal of L is not stored, * and D is stored in its place. There are no supernodes. * * simplicial LL': (is_ll TRUE, is_super FALSE). Uses the same storage * scheme as the simplicial LDL', except that D does not appear. * The first entry of each column of L is the diagonal entry of * that column of L. * * supernodal LDL': (is_ll FALSE, is_super TRUE). Not used. * FUTURE WORK: add support for supernodal LDL' * * supernodal LL': (is_ll TRUE, is_super TRUE). A supernodal factor, * using the supernodal components described above (nsuper, ssize, * xsize, maxcsize, maxesize, super, pi, px, s, x, and z). * * * Symbolic types (xtype is CHOLMOD_PATTERN) * ----------------------------------------- * * simplicial LDL': (is_ll FALSE, is_super FALSE). Nothing is present * except Perm and ColCount. * * simplicial LL': (is_ll TRUE, is_super FALSE). Identical to the * simplicial LDL', except for the is_ll flag. * * supernodal LDL': (is_ll FALSE, is_super TRUE). Not used. * FUTURE WORK: add support for supernodal LDL' * * supernodal LL': (is_ll TRUE, is_super TRUE). A supernodal symbolic * factorization. The simplicial symbolic information is present * (Perm and ColCount), as is all of the supernodal factorization * except for the numerical values (x and z). */ int itype ; /* The integer arrays are Perm, ColCount, p, i, nz, * next, prev, super, pi, px, and s. If itype is * CHOLMOD_INT, all of these are int arrays. * CHOLMOD_INTLONG: p, pi, px are SuiteSparse_long, others int. * CHOLMOD_LONG: all integer arrays are SuiteSparse_long. */ int xtype ; /* pattern, real, complex, or zomplex */ int dtype ; /* x and z double or float */ } cholmod_factor ; // in ../../src/CHOLMOD/Include/cholmod_core.h skip forward to - line 1836 : /* A dense matrix in column-oriented form. It has no itype since it contains * no integers. Entry in row i and column j is located in x [i+j*d]. */ typedef struct cholmod_dense_struct { size_t nrow ; /* the matrix is nrow-by-ncol */ size_t ncol ; size_t nzmax ; /* maximum number of entries in the matrix */ size_t d ; /* leading dimension (d >= nrow must hold) */ void *x ; /* size nzmax or 2*nzmax, if present */ void *z ; /* size nzmax, if present */ int xtype ; /* pattern, real, complex, or zomplex */ int dtype ; /* x and z double or float */ } cholmod_dense ; // in ../../src/CHOLMOD/Include/cholmod_core.h skip forward to - line 2033 : /* A sparse matrix stored in triplet form. */ typedef struct cholmod_triplet_struct { size_t nrow ; /* the matrix is nrow-by-ncol */ size_t ncol ; size_t nzmax ; /* maximum number of entries in the matrix */ size_t nnz ; /* number of nonzeros in the matrix */ void *i ; /* i [0..nzmax-1], the row indices */ void *j ; /* j [0..nzmax-1], the column indices */ void *x ; /* size nzmax or 2*nzmax, if present */ void *z ; /* size nzmax, if present */ int stype ; /* Describes what parts of the matrix are considered: * * 0: matrix is "unsymmetric": use both upper and lower triangular parts * (the matrix may actually be symmetric in pattern and value, but * both parts are explicitly stored and used). May be square or * rectangular. * >0: matrix is square and symmetric. Entries in the lower triangular * part are transposed and added to the upper triangular part when * the matrix is converted to cholmod_sparse form. * <0: matrix is square and symmetric. Entries in the upper triangular * part are transposed and added to the lower triangular part when * the matrix is converted to cholmod_sparse form. * * Note that stype>0 and stype<0 are different for cholmod_sparse and * cholmod_triplet. The reason is simple. You can permute a symmetric * triplet matrix by simply replacing a row and column index with their * new row and column indices, via an inverse permutation. Suppose * P = L->Perm is your permutation, and Pinv is an array of size n. * Suppose a symmetric matrix A is represent by a triplet matrix T, with * entries only in the upper triangular part. Then the following code: * * Ti = T->i ; * Tj = T->j ; * for (k = 0 ; k < n ; k++) Pinv [P [k]] = k ; * for (k = 0 ; k < nz ; k++) Ti [k] = Pinv [Ti [k]] ; * for (k = 0 ; k < nz ; k++) Tj [k] = Pinv [Tj [k]] ; * * creates the triplet form of C=P*A*P'. However, if T initially * contains just the upper triangular entries (T->stype = 1), after * permutation it has entries in both the upper and lower triangular * parts. These entries should be transposed when constructing the * cholmod_sparse form of A, which is what cholmod_triplet_to_sparse * does. Thus: * * C = cholmod_triplet_to_sparse (T, 0, &Common) ; * * will return the matrix C = P*A*P'. * * Since the triplet matrix T is so simple to generate, it's quite easy * to remove entries that you do not want, prior to converting T to the * cholmod_sparse form. So if you include these entries in T, CHOLMOD * assumes that there must be a reason (such as the one above). Thus, * no entry in a triplet matrix is ever ignored. */ int itype ; /* CHOLMOD_LONG: i and j are SuiteSparse_long. Otherwise int */ int xtype ; /* pattern, real, complex, or zomplex */ int dtype ; /* x and z are double or float */ } cholmod_triplet ; // -------- our (Matrix) short and const_ forms of of the pointers : typedef cholmod_common* CHM_CM; typedef cholmod_dense* CHM_DN; typedef const cholmod_dense* const_CHM_DN; typedef cholmod_factor* CHM_FR; typedef const cholmod_factor* const_CHM_FR; typedef cholmod_sparse* CHM_SP; typedef const cholmod_sparse* const_CHM_SP; typedef cholmod_triplet* CHM_TR; typedef const cholmod_triplet* const_CHM_TR; // --------- Matrix ("M_") R ("R_") pkg routines "re-exported": --------------- int cholmod_start(CHM_CM); void R_cholmod_error(int status, const char *file, int line, const char *message); int cholmod_finish(CHM_CM); CHM_SP cholmod_allocate_sparse(size_t nrow, size_t ncol, size_t nzmax, int sorted, int packed, int stype, int xtype, CHM_CM); int cholmod_free_factor(CHM_FR *L, CHM_CM); int cholmod_free_dense(CHM_DN *A, CHM_CM); int cholmod_free_sparse(CHM_SP *A, CHM_CM); int cholmod_free_triplet(CHM_TR *T, CHM_CM); long int cholmod_nnz(const_CHM_SP, CHM_CM); CHM_SP cholmod_speye(size_t nrow, size_t ncol, int xtype, CHM_CM); CHM_SP cholmod_transpose(const_CHM_SP, int values, CHM_CM); int cholmod_sort(CHM_SP A, CHM_CM); CHM_SP cholmod_vertcat(const_CHM_SP, const_CHM_SP, int values, CHM_CM); CHM_SP cholmod_copy(const_CHM_SP, int stype, int mode, CHM_CM); CHM_SP cholmod_add(const_CHM_SP, const_CHM_SP, double alpha [2], double beta [2], int values, int sorted, CHM_CM); // from ../../src/CHOLMOD/Include/cholmod_cholesky.h - line 178 : #define CHOLMOD_A 0 /* solve Ax=b */ #define CHOLMOD_LDLt 1 /* solve LDL'x=b */ #define CHOLMOD_LD 2 /* solve LDx=b */ #define CHOLMOD_DLt 3 /* solve DL'x=b */ #define CHOLMOD_L 4 /* solve Lx=b */ #define CHOLMOD_Lt 5 /* solve L'x=b */ #define CHOLMOD_D 6 /* solve Dx=b */ #define CHOLMOD_P 7 /* permute x=Px */ #define CHOLMOD_Pt 8 /* permute x=P'x */ CHM_DN cholmod_solve(int, const_CHM_FR, const_CHM_DN, CHM_CM); CHM_SP cholmod_spsolve(int, const_CHM_FR, const_CHM_SP, CHM_CM); int cholmod_sdmult(const_CHM_SP, int, const double*, const double*, const_CHM_DN, CHM_DN Y, CHM_CM); CHM_SP cholmod_ssmult(const_CHM_SP, const_CHM_SP, int, int, int, CHM_CM); int cholmod_factorize(const_CHM_SP, CHM_FR L, CHM_CM); int cholmod_factorize_p(const_CHM_SP, double *beta, int *fset, size_t fsize, CHM_FR L, CHM_CM); CHM_SP cholmod_copy_sparse(const_CHM_SP, CHM_CM); CHM_DN cholmod_copy_dense(const_CHM_DN, CHM_CM); CHM_SP cholmod_aat(const_CHM_SP, int *fset, size_t fsize, int mode, CHM_CM); int cholmod_band_inplace(CHM_SP A, int k1, int k2, int mode, CHM_CM); CHM_SP cholmod_add(const_CHM_SP, const_CHM_SP, double alpha[2], double beta[2], int values, int sorted, CHM_CM); CHM_DN cholmod_allocate_dense(size_t nrow, size_t ncol, size_t d, int xtype, CHM_CM); CHM_FR cholmod_analyze(const_CHM_SP, CHM_CM); CHM_FR cholmod_analyze_p(const_CHM_SP, int *Perm, int *fset, size_t fsize, CHM_CM); int cholmod_change_factor(int to_xtype, int to_ll, int to_super, int to_packed, int to_monotonic, CHM_FR L, CHM_CM); CHM_FR cholmod_copy_factor(const_CHM_FR, CHM_CM); CHM_SP cholmod_factor_to_sparse(const_CHM_FR, CHM_CM); CHM_SP cholmod_dense_to_sparse(const_CHM_DN, int values, CHM_CM); int cholmod_defaults (CHM_CM); CHM_SP cholmod_triplet_to_sparse(const cholmod_triplet*, int nzmax, CHM_CM); CHM_SP cholmod_submatrix(const_CHM_SP, int *rset, int rsize, int *cset, int csize, int values, int sorted, CHM_CM); CHM_TR cholmod_sparse_to_triplet(const_CHM_SP, CHM_CM); CHM_DN cholmod_sparse_to_dense(const_CHM_SP, CHM_CM); CHM_TR cholmod_allocate_triplet (size_t nrow, size_t ncol, size_t nzmax, int stype, int xtype, CHM_CM); // from ../../src/CHOLMOD/Include/cholmod_matrixops.h - line 107 : /* scaling modes, selected by the scale input parameter: */ #define CHOLMOD_SCALAR 0 /* A = s*A */ #define CHOLMOD_ROW 1 /* A = diag(s)*A */ #define CHOLMOD_COL 2 /* A = A*diag(s) */ #define CHOLMOD_SYM 3 /* A = diag(s)*A*diag(s) */ int cholmod_scale(const_CHM_DN, int scale, CHM_SP, CHM_CM); // added in the Matrix package - the log of the determinant of the matrix that was factored double chm_factor_ldetL2(const_CHM_FR); #ifdef __cplusplus } #endif #endif /* RcppEigen_CHOLMOD_H */ RcppEigen/inst/include/RcppEigenStubs.h0000644000176200001440000004104512253717461017610 0ustar liggesusers// RcppEigenStubs.cpp: Provide access to compiled CHOLMOD functions in // the Matrix package. // // Copyright (C) 2011 Douglas Bates and Martin Maechler // // This file is part of RcppEigen. // // RcppEigen is free software: you can redistribute it and/or modify it // under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // RcppEigen is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with RcppEigen. If not, see . // Yes, this really is a C++ source file in an include directory. To // use the Cholmod support functions in RcppEigen you should create a // source file, say MyPackage/src/local_stubs.c that contains the // single line #include "RcppEigenStubs.cpp" #include #include #include #ifdef __cplusplus extern "C" { #endif inline CHM_DN as_cholmod_dense(CHM_DN ans, SEXP x) { static CHM_DN(*fun)(CHM_DN,SEXP) = NULL; if(fun == NULL) fun = (CHM_DN(*)(CHM_DN,SEXP)) R_GetCCallable("Matrix", "as_cholmod_dense"); return fun(ans, x); } inline CHM_FR as_cholmod_factor(CHM_FR ans, SEXP x) { static CHM_FR(*fun)(CHM_FR,SEXP) = NULL; if(fun == NULL) fun = (CHM_FR(*)(CHM_FR,SEXP)) R_GetCCallable("Matrix", "as_cholmod_factor"); return fun(ans, x); } inline CHM_SP as_cholmod_sparse(CHM_SP ans, SEXP x, Rboolean check_Udiag, Rboolean sort_in_place) { static CHM_SP(*fun)(CHM_SP,SEXP,Rboolean,Rboolean)= NULL; if(fun == NULL) fun = (CHM_SP(*)(CHM_SP,SEXP,Rboolean,Rboolean)) R_GetCCallable("Matrix", "as_cholmod_sparse"); return fun(ans, x, check_Udiag, sort_in_place); } inline CHM_TR as_cholmod_triplet(CHM_TR ans, SEXP x, Rboolean check_Udiag) { static CHM_TR(*fun)(CHM_TR,SEXP,Rboolean)= NULL; if(fun == NULL) fun = (CHM_TR(*)(CHM_TR,SEXP,Rboolean)) R_GetCCallable("Matrix", "as_cholmod_triplet"); return fun(ans, x, check_Udiag); } inline SEXP Csparse_diagU2N(SEXP x) { static SEXP(*fun)(SEXP) = NULL; if(fun == NULL) fun = (SEXP(*)(SEXP)) R_GetCCallable("Matrix", "Csparse_diagU2N"); return fun(x); } inline SEXP M_chm_factor_to_SEXP(const_CHM_FR f, int dofree) { static SEXP(*fun)(const_CHM_FR,int) = NULL; if(fun == NULL) fun = (SEXP(*)(const_CHM_FR,int)) R_GetCCallable("Matrix", "chm_factor_to_SEXP"); return fun(f, dofree); } inline double M_chm_factor_ldetL2(const_CHM_FR f) { static double(*fun)(const_CHM_FR) = NULL; if(fun == NULL) fun = (double(*)(const_CHM_FR)) R_GetCCallable("Matrix", "chm_factor_ldetL2"); return fun(f); } inline CHM_FR M_chm_factor_update(CHM_FR f, const_CHM_SP A, double mult) { static CHM_FR(*fun)(CHM_FR,const_CHM_SP,double) = NULL; if(fun == NULL) fun = (CHM_FR(*)(CHM_FR,const_CHM_SP,double)) R_GetCCallable("Matrix", "chm_factor_update"); return fun(f, A, mult); } inline SEXP M_chm_sparse_to_SEXP(const_CHM_SP a, int dofree, int uploT, int Rkind, const char *diag, SEXP dn) { static SEXP(*fun)(const_CHM_SP,int,int,int,const char*,SEXP) = NULL; if(fun == NULL) fun = (SEXP(*)(const_CHM_SP,int,int,int,const char*,SEXP)) R_GetCCallable("Matrix", "chm_sparse_to_SEXP"); return fun(a, dofree, uploT, Rkind, diag, dn); } inline SEXP M_chm_triplet_to_SEXP(const CHM_TR a, int dofree, int uploT, int Rkind, const char *diag, SEXP dn) { static SEXP(*fun)(const CHM_TR,int,int,int,const char*,SEXP) = NULL; if(fun == NULL) fun = (SEXP(*)(const CHM_TR,int,int,int,const char*,SEXP)) R_GetCCallable("Matrix", "chm_triplet_to_SEXP"); return fun(a, dofree, uploT, Rkind, diag, dn); } inline CHM_SP cholmod_aat(const_CHM_SP A, int *fset, size_t fsize, int mode, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,int*,size_t, int,CHM_CM) = NULL; if(fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,int*,size_t, int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_aat"); return fun(A, fset, fsize, mode, Common); } inline int M_cholmod_band_inplace(CHM_SP A, int k1, int k2, int mode, CHM_CM Common) { static int(*fun)(CHM_SP,int,int,int,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_SP,int,int,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_band_inplace"); return fun(A, k1, k2, mode, Common); } inline CHM_SP cholmod_add(const_CHM_SP A, const_CHM_SP B, double alpha[2], double beta[2], int values, int sorted, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,const_CHM_SP, double*,double*,int,int, CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,const_CHM_SP, double*,double*,int,int, CHM_CM)) R_GetCCallable("Matrix", "cholmod_add"); return fun(A, B, alpha, beta, values, sorted, Common); } inline CHM_DN cholmod_allocate_dense(size_t nrow, size_t ncol, size_t d, int xtype, CHM_CM Common) { static CHM_DN(*fun)(size_t,size_t,size_t, int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_DN(*)(size_t,size_t,size_t, int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_allocate_dense"); return fun(nrow, ncol, d, xtype, Common); } inline CHM_SP cholmod_allocate_sparse(size_t nrow, size_t ncol, size_t nzmax, int sorted, int packed, int stype, int xtype, CHM_CM Common) { static CHM_SP(*fun)(size_t,size_t,size_t,int,int, int,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*) (size_t,size_t,size_t,int,int,int,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_allocate_sparse"); return fun(nrow,ncol,nzmax,sorted,packed,stype,xtype,Common); } inline CHM_TR cholmod_allocate_triplet(size_t nrow, size_t ncol, size_t nzmax, int stype, int xtype, CHM_CM Common) { static CHM_TR(*fun)(size_t,size_t,size_t, int,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_TR(*)(size_t,size_t,size_t,int,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_allocate_triplet"); return fun(nrow,ncol,nzmax,stype,xtype,Common); } inline CHM_SP cholmod_triplet_to_sparse(const cholmod_triplet* T, int nzmax, CHM_CM Common) { static CHM_SP(*fun)(const cholmod_triplet*,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const cholmod_triplet*,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_triplet_to_sparse"); return fun(T, nzmax, Common); } inline CHM_TR cholmod_sparse_to_triplet(const_CHM_SP A, CHM_CM Common) { static CHM_TR(*fun)(const_CHM_SP,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_TR(*)(const_CHM_SP,CHM_CM)) R_GetCCallable("Matrix", "cholmod_sparse_to_triplet"); return fun(A, Common); } inline CHM_DN cholmod_sparse_to_dense(const_CHM_SP A, CHM_CM Common) { static CHM_DN(*fun)(const_CHM_SP,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_DN(*)(const_CHM_SP,CHM_CM)) R_GetCCallable("Matrix", "cholmod_sparse_to_dense"); return fun(A, Common); } inline CHM_FR cholmod_analyze(const_CHM_SP A, CHM_CM Common) { static CHM_FR(*fun)(const_CHM_SP,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_FR(*)(const_CHM_SP,CHM_CM)) R_GetCCallable("Matrix", "cholmod_analyze"); return fun(A, Common); } inline CHM_FR cholmod_analyze_p(const_CHM_SP A, int *Perm, int *fset, size_t fsize, CHM_CM Common) { static CHM_FR(*fun)(const_CHM_SP,int*,int*,size_t, CHM_CM) = NULL; if (fun == NULL) fun = (CHM_FR(*)(const_CHM_SP,int*,int*, size_t,CHM_CM)) R_GetCCallable("Matrix", "cholmod_analyze_p"); return fun(A, Perm, fset, fsize, Common); } inline CHM_SP cholmod_copy(const_CHM_SP A, int stype, int mode, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,int,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,int,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_copy"); return fun(A, stype, mode, Common); } inline CHM_DN cholmod_copy_dense(const_CHM_DN A, CHM_CM Common) { static CHM_DN(*fun)(const_CHM_DN,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_DN(*)(const_CHM_DN,CHM_CM)) R_GetCCallable("Matrix", "cholmod_copy_dense"); return fun(A, Common); } inline CHM_FR cholmod_copy_factor(const_CHM_FR L, CHM_CM Common) { static CHM_FR(*fun)(const_CHM_FR,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_FR(*)(const_CHM_FR,CHM_CM)) R_GetCCallable("Matrix", "cholmod_copy_factor"); return fun(L, Common); } inline int cholmod_change_factor(int to_xtype, int to_ll, int to_super, int to_packed, int to_monotonic, CHM_FR L, CHM_CM Common) { static int(*fun)(int,int,int,int,int,CHM_FR,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(int,int,int,int,int,CHM_FR,CHM_CM)) R_GetCCallable("Matrix", "cholmod_change_factor"); return fun(to_xtype, to_ll, to_super, to_packed, to_monotonic, L, Common); } inline CHM_SP cholmod_copy_sparse(const_CHM_SP A, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,CHM_CM)) R_GetCCallable("Matrix", "cholmod_copy_sparse"); return fun(A, Common); } inline CHM_SP cholmod_factor_to_sparse(const_CHM_FR L, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_FR,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_FR,CHM_CM)) R_GetCCallable("Matrix", "cholmod_factor_to_sparse"); return fun(L, Common); } inline CHM_SP cholmod_submatrix(const_CHM_SP A, int *rset, int rsize, int *cset, int csize, int values, int sorted, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,int*,int,int*,int, int,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,int*,int,int*, int,int,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_submatrix"); return fun(A, rset, rsize, cset, csize, values, sorted, Common); } inline CHM_SP cholmod_dense_to_sparse(const_CHM_DN X, int values, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_DN,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_DN,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_dense_to_sparse"); return fun(X, values, Common); } inline int cholmod_factorize(const_CHM_SP A, CHM_FR L, CHM_CM Common) { static int(*fun)(const_CHM_SP,CHM_FR,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(const_CHM_SP,CHM_FR,CHM_CM)) R_GetCCallable("Matrix", "cholmod_factorize"); return fun(A, L, Common); } inline int cholmod_factorize_p(const_CHM_SP A, double *beta, int *fset, size_t fsize, CHM_FR L, CHM_CM Common) { static int(*fun)(const_CHM_SP,double*,int*,size_t, CHM_FR,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(const_CHM_SP,double*,int*,size_t, CHM_FR,CHM_CM)) R_GetCCallable("Matrix", "cholmod_factorize_p"); return fun(A, beta, fset, fsize, L, Common); } inline int cholmod_finish(CHM_CM Common) { static int(*fun)(CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_CM)) R_GetCCallable("Matrix", "cholmod_finish"); return fun(Common); } inline int cholmod_sort(CHM_SP A, CHM_CM Common) { static int(*fun)(CHM_SP,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_SP,CHM_CM)) R_GetCCallable("Matrix", "cholmod_sort"); return fun(A, Common); } inline int cholmod_free_dense(CHM_DN *A, CHM_CM Common) { static int(*fun)(CHM_DN*,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_DN*,CHM_CM)) R_GetCCallable("Matrix", "cholmod_free_dense"); return fun(A, Common); } inline int cholmod_free_factor(CHM_FR *L, CHM_CM Common) { static int(*fun)(CHM_FR*,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_FR*,CHM_CM)) R_GetCCallable("Matrix", "cholmod_free_factor"); return fun(L, Common); } inline int cholmod_free_sparse(CHM_SP *A, CHM_CM Common) { static int(*fun)(CHM_SP*,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_SP*,CHM_CM)) R_GetCCallable("Matrix", "cholmod_free_sparse"); return fun(A, Common); } inline int cholmod_free_triplet(cholmod_triplet **T, CHM_CM Common) { static int(*fun)(cholmod_triplet**,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(cholmod_triplet**,CHM_CM)) R_GetCCallable("Matrix", "cholmod_free_triplet"); return fun(T, Common); } inline long int cholmod_nnz(const_CHM_SP A, CHM_CM Common) { static long(*fun)(const_CHM_SP,CHM_CM) = NULL; if (fun == NULL) fun = (long(*)(const_CHM_SP,CHM_CM)) R_GetCCallable("Matrix", "cholmod_nnz"); return fun(A, Common); } inline int cholmod_sdmult(const_CHM_SP A, int transpose, const double *alpha, const double *beta, const_CHM_DN X, CHM_DN Y, CHM_CM Common) { static int(*fun)(const_CHM_SP,int,const double*, const double*,const_CHM_DN, CHM_DN,CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(const_CHM_SP,int,const double*, const double*, const_CHM_DN, CHM_DN,CHM_CM)) R_GetCCallable("Matrix", "cholmod_sdmult"); return fun(A, transpose, alpha, beta, X, Y, Common); } inline CHM_SP cholmod_ssmult(const_CHM_SP A, const_CHM_SP B, int stype, int values, int sorted, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,const_CHM_SP, int,int,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,const_CHM_SP, int,int,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_ssmult"); return fun(A, B, stype, values, sorted, Common); } inline CHM_DN cholmod_solve(int sys, const_CHM_FR L, const_CHM_DN B, CHM_CM Common) { static CHM_DN(*fun)(int,const_CHM_FR,const_CHM_DN, CHM_CM) = NULL; if (fun == NULL) fun = (CHM_DN(*)(int,const_CHM_FR,const_CHM_DN, CHM_CM)) R_GetCCallable("Matrix", "cholmod_solve"); return fun(sys, L, B, Common); } inline CHM_SP cholmod_speye(size_t nrow, size_t ncol, int xtype, CHM_CM Common) { static CHM_SP(*fun)(size_t,size_t,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(size_t,size_t,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_speye"); return fun(nrow, ncol, xtype, Common); } inline CHM_SP cholmod_spsolve(int sys, const_CHM_FR L, const_CHM_SP B, CHM_CM Common) { static CHM_SP(*fun)(int,const_CHM_FR, const_CHM_SP, CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(int,const_CHM_FR, const_CHM_SP, CHM_CM)) R_GetCCallable("Matrix", "cholmod_spsolve"); return fun(sys, L, B, Common); } inline int cholmod_defaults (CHM_CM Common) { static int(*fun)(CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_CM)) R_GetCCallable("Matrix", "cholmod_defaults"); return fun(Common); } /* extern cholmod_common c; */ inline void R_cholmod_error(int status, const char *file, int line, const char *message) { /* NB: keep in sync with R_cholmod_error(), ../../src/chm_common.c */ if(status < 0) { /* Note: Matrix itself uses CHM_set_common_env, CHM_store_common * and CHM_restore_common to preserve settings through error calls. * Consider defining your own error handler, *and* possibly restoring * *your* version of the cholmod_common that *you* use. */ Rf_error("Cholmod error '%s' at file:%s, line %d", message, file, line); } else Rf_warning("Cholmod warning '%s' at file:%s, line %d", message, file, line); } inline int cholmod_start(CHM_CM Common) { int val; static int(*fun)(CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(CHM_CM)) R_GetCCallable("Matrix", "cholmod_start"); val = fun(Common); /*-- NB: keep in sync with R_cholmod_start() --> ../../src/chm_common.c */ /* do not allow CHOLMOD printing - currently */ Common->print_function = NULL;/* was R_cholmod_printf; /.* Rprintf gives warning */ /* Consider using your own error handler: */ Common->error_handler = R_cholmod_error; return val; } inline CHM_SP cholmod_transpose(const_CHM_SP A, int values, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,int,CHM_CM)) R_GetCCallable("Matrix", "cholmod_transpose"); return fun(A, values, Common); } inline CHM_SP cholmod_vertcat(const_CHM_SP A, const_CHM_SP B, int values, CHM_CM Common) { static CHM_SP(*fun)(const_CHM_SP,const_CHM_SP,int,CHM_CM) = NULL; if (fun == NULL) fun = (CHM_SP(*)(const_CHM_SP,const_CHM_SP, int, CHM_CM)) R_GetCCallable("Matrix", "cholmod_vertcat"); return fun(A, B, values, Common); } inline int cholmod_scale(const_CHM_DN S, int scale, CHM_SP A, CHM_CM Common) { static int(*fun)(const_CHM_DN,int,CHM_SP, CHM_CM) = NULL; if (fun == NULL) fun = (int(*)(const_CHM_DN,int,CHM_SP, CHM_CM)) R_GetCCallable("Matrix", "cholmod_scale"); return fun(S, scale, A, Common); } #ifdef __cplusplus } #endif RcppEigen/inst/include/unsupported/0000755000176200001440000000000012253717461017126 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/0000755000176200001440000000000012446006401020142 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/AdolcForward0000644000176200001440000001022412446006401022433 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ADLOC_FORWARD #define EIGEN_ADLOC_FORWARD //-------------------------------------------------------------------------------- // // This file provides support for adolc's adouble type in forward mode. // ADOL-C is a C++ automatic differentiation library, // see https://projects.coin-or.org/ADOL-C for more information. // // Note that the maximal number of directions is controlled by // the preprocessor token NUMBER_DIRECTIONS. The default is 2. // //-------------------------------------------------------------------------------- #define ADOLC_TAPELESS #ifndef NUMBER_DIRECTIONS # define NUMBER_DIRECTIONS 2 #endif #include // adolc defines some very stupid macros: #if defined(malloc) # undef malloc #endif #if defined(calloc) # undef calloc #endif #if defined(realloc) # undef realloc #endif #include namespace Eigen { /** * \defgroup AdolcForward_Module Adolc forward module * This module provides support for adolc's adouble type in forward mode. * ADOL-C is a C++ automatic differentiation library, * see https://projects.coin-or.org/ADOL-C for more information. * It mainly consists in: * - a struct Eigen::NumTraits specialization * - overloads of internal::* math function for adtl::adouble type. * * Note that the maximal number of directions is controlled by * the preprocessor token NUMBER_DIRECTIONS. The default is 2. * * \code * #include * \endcode */ //@{ } // namespace Eigen // Eigen's require a few additional functions which must be defined in the same namespace // than the custom scalar type own namespace namespace adtl { inline const adouble& conj(const adouble& x) { return x; } inline const adouble& real(const adouble& x) { return x; } inline adouble imag(const adouble&) { return 0.; } inline adouble abs(const adouble& x) { return fabs(x); } inline adouble abs2(const adouble& x) { return x*x; } } namespace Eigen { template<> struct NumTraits : NumTraits { typedef adtl::adouble Real; typedef adtl::adouble NonInteger; typedef adtl::adouble Nested; enum { IsComplex = 0, IsInteger = 0, IsSigned = 1, RequireInitialization = 1, ReadCost = 1, AddCost = 1, MulCost = 1 }; }; template class AdolcForwardJacobian : public Functor { typedef adtl::adouble ActiveScalar; public: AdolcForwardJacobian() : Functor() {} AdolcForwardJacobian(const Functor& f) : Functor(f) {} // forward constructors template AdolcForwardJacobian(const T0& a0) : Functor(a0) {} template AdolcForwardJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {} template AdolcForwardJacobian(const T0& a0, const T1& a1, const T1& a2) : Functor(a0, a1, a2) {} typedef typename Functor::InputType InputType; typedef typename Functor::ValueType ValueType; typedef typename Functor::JacobianType JacobianType; typedef Matrix ActiveInput; typedef Matrix ActiveValue; void operator() (const InputType& x, ValueType* v, JacobianType* _jac) const { eigen_assert(v!=0); if (!_jac) { Functor::operator()(x, v); return; } JacobianType& jac = *_jac; ActiveInput ax = x.template cast(); ActiveValue av(jac.rows()); for (int j=0; j // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPLINES_MODULE_H #define EIGEN_SPLINES_MODULE_H namespace Eigen { /** * \defgroup Splines_Module Spline and spline fitting module * * This module provides a simple multi-dimensional spline class while * offering most basic functionality to fit a spline to point sets. * * \code * #include * \endcode */ } #include "src/Splines/SplineFwd.h" #include "src/Splines/Spline.h" #include "src/Splines/SplineFitting.h" #endif // EIGEN_SPLINES_MODULE_H RcppEigen/inst/include/unsupported/Eigen/NumericalDiff0000644000176200001440000000335512253717461022616 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NUMERICALDIFF_MODULE #define EIGEN_NUMERICALDIFF_MODULE #include namespace Eigen { /** * \defgroup NumericalDiff_Module Numerical differentiation module * * \code * #include * \endcode * * See http://en.wikipedia.org/wiki/Numerical_differentiation * * Warning : this should NOT be confused with automatic differentiation, which * is a different method and has its own module in Eigen : \ref * AutoDiff_Module. * * Currently only "Forward" and "Central" schemes are implemented. Those * are basic methods, and there exist some more elaborated way of * computing such approximates. They are implemented using both * proprietary and free software, and usually requires linking to an * external library. It is very easy for you to write a functor * using such software, and the purpose is quite orthogonal to what we * want to achieve with Eigen. * * This is why we will not provide wrappers for every great numerical * differentiation software that exist, but should rather stick with those * basic ones, that still are useful for testing. * * Also, the \ref NonLinearOptimization_Module needs this in order to * provide full features compatibility with the original (c)minpack * package. * */ } //@{ #include "src/NumericalDiff/NumericalDiff.h" //@} #endif // EIGEN_NUMERICALDIFF_MODULE RcppEigen/inst/include/unsupported/Eigen/src/0000755000176200001440000000000012446006401020731 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/Splines/0000755000176200001440000000000012253717461022361 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/Splines/SplineFitting.h0000644000176200001440000001264112253717461025315 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 20010-2011 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPLINE_FITTING_H #define EIGEN_SPLINE_FITTING_H #include #include "SplineFwd.h" #include namespace Eigen { /** * \brief Computes knot averages. * \ingroup Splines_Module * * The knots are computed as * \f{align*} * u_0 & = \hdots = u_p = 0 \\ * u_{m-p} & = \hdots = u_{m} = 1 \\ * u_{j+p} & = \frac{1}{p}\sum_{i=j}^{j+p-1}\bar{u}_i \quad\quad j=1,\hdots,n-p * \f} * where \f$p\f$ is the degree and \f$m+1\f$ the number knots * of the desired interpolating spline. * * \param[in] parameters The input parameters. During interpolation one for each data point. * \param[in] degree The spline degree which is used during the interpolation. * \param[out] knots The output knot vector. * * \sa Les Piegl and Wayne Tiller, The NURBS book (2nd ed.), 1997, 9.2.1 Global Curve Interpolation to Point Data **/ template void KnotAveraging(const KnotVectorType& parameters, DenseIndex degree, KnotVectorType& knots) { knots.resize(parameters.size()+degree+1); for (DenseIndex j=1; j void ChordLengths(const PointArrayType& pts, KnotVectorType& chord_lengths) { typedef typename KnotVectorType::Scalar Scalar; const DenseIndex n = pts.cols(); // 1. compute the column-wise norms chord_lengths.resize(pts.cols()); chord_lengths[0] = 0; chord_lengths.rightCols(n-1) = (pts.array().leftCols(n-1) - pts.array().rightCols(n-1)).matrix().colwise().norm(); // 2. compute the partial sums std::partial_sum(chord_lengths.data(), chord_lengths.data()+n, chord_lengths.data()); // 3. normalize the data chord_lengths /= chord_lengths(n-1); chord_lengths(n-1) = Scalar(1); } /** * \brief Spline fitting methods. * \ingroup Splines_Module **/ template struct SplineFitting { typedef typename SplineType::KnotVectorType KnotVectorType; /** * \brief Fits an interpolating Spline to the given data points. * * \param pts The points for which an interpolating spline will be computed. * \param degree The degree of the interpolating spline. * * \returns A spline interpolating the initially provided points. **/ template static SplineType Interpolate(const PointArrayType& pts, DenseIndex degree); /** * \brief Fits an interpolating Spline to the given data points. * * \param pts The points for which an interpolating spline will be computed. * \param degree The degree of the interpolating spline. * \param knot_parameters The knot parameters for the interpolation. * * \returns A spline interpolating the initially provided points. **/ template static SplineType Interpolate(const PointArrayType& pts, DenseIndex degree, const KnotVectorType& knot_parameters); }; template template SplineType SplineFitting::Interpolate(const PointArrayType& pts, DenseIndex degree, const KnotVectorType& knot_parameters) { typedef typename SplineType::KnotVectorType::Scalar Scalar; typedef typename SplineType::ControlPointVectorType ControlPointVectorType; typedef Matrix MatrixType; KnotVectorType knots; KnotAveraging(knot_parameters, degree, knots); DenseIndex n = pts.cols(); MatrixType A = MatrixType::Zero(n,n); for (DenseIndex i=1; i qr(A); // Here, we are creating a temporary due to an Eigen issue. ControlPointVectorType ctrls = qr.solve(MatrixType(pts.transpose())).transpose(); return SplineType(knots, ctrls); } template template SplineType SplineFitting::Interpolate(const PointArrayType& pts, DenseIndex degree) { KnotVectorType chord_lengths; // knot parameters ChordLengths(pts, chord_lengths); return Interpolate(pts, degree, chord_lengths); } } #endif // EIGEN_SPLINE_FITTING_H RcppEigen/inst/include/unsupported/Eigen/src/Splines/SplineFwd.h0000644000176200001440000000746612253717461024442 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 20010-2011 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPLINES_FWD_H #define EIGEN_SPLINES_FWD_H #include namespace Eigen { template class Spline; template < typename SplineType, int DerivativeOrder = Dynamic > struct SplineTraits {}; /** * \ingroup Splines_Module * \brief Compile-time attributes of the Spline class for Dynamic degree. **/ template struct SplineTraits< Spline<_Scalar, _Dim, _Degree>, Dynamic > { typedef _Scalar Scalar; /*!< The spline curve's scalar type. */ enum { Dimension = _Dim /*!< The spline curve's dimension. */ }; enum { Degree = _Degree /*!< The spline curve's degree. */ }; enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ }; enum { NumOfDerivativesAtCompileTime = OrderAtCompileTime /*!< The number of derivatives defined for the current spline. */ }; /** \brief The data type used to store non-zero basis functions. */ typedef Array BasisVectorType; /** \brief The data type used to store the values of the basis function derivatives. */ typedef Array BasisDerivativeType; /** \brief The data type used to store the spline's derivative values. */ typedef Array DerivativeType; /** \brief The point type the spline is representing. */ typedef Array PointType; /** \brief The data type used to store knot vectors. */ typedef Array KnotVectorType; /** \brief The data type representing the spline's control points. */ typedef Array ControlPointVectorType; }; /** * \ingroup Splines_Module * \brief Compile-time attributes of the Spline class for fixed degree. * * The traits class inherits all attributes from the SplineTraits of Dynamic degree. **/ template < typename _Scalar, int _Dim, int _Degree, int _DerivativeOrder > struct SplineTraits< Spline<_Scalar, _Dim, _Degree>, _DerivativeOrder > : public SplineTraits< Spline<_Scalar, _Dim, _Degree> > { enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ }; enum { NumOfDerivativesAtCompileTime = _DerivativeOrder==Dynamic ? Dynamic : _DerivativeOrder+1 /*!< The number of derivatives defined for the current spline. */ }; /** \brief The data type used to store the values of the basis function derivatives. */ typedef Array<_Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType; /** \brief The data type used to store the spline's derivative values. */ typedef Array<_Scalar,_Dim,Dynamic,ColMajor,_Dim,NumOfDerivativesAtCompileTime> DerivativeType; }; /** \brief 2D float B-spline with dynamic degree. */ typedef Spline Spline2f; /** \brief 3D float B-spline with dynamic degree. */ typedef Spline Spline3f; /** \brief 2D double B-spline with dynamic degree. */ typedef Spline Spline2d; /** \brief 3D double B-spline with dynamic degree. */ typedef Spline Spline3d; } #endif // EIGEN_SPLINES_FWD_H RcppEigen/inst/include/unsupported/Eigen/src/Splines/Spline.h0000644000176200001440000004065712253717461024000 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 20010-2011 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPLINE_H #define EIGEN_SPLINE_H #include "SplineFwd.h" namespace Eigen { /** * \ingroup Splines_Module * \class Spline * \brief A class representing multi-dimensional spline curves. * * The class represents B-splines with non-uniform knot vectors. Each control * point of the B-spline is associated with a basis function * \f{align*} * C(u) & = \sum_{i=0}^{n}N_{i,p}(u)P_i * \f} * * \tparam _Scalar The underlying data type (typically float or double) * \tparam _Dim The curve dimension (e.g. 2 or 3) * \tparam _Degree Per default set to Dynamic; could be set to the actual desired * degree for optimization purposes (would result in stack allocation * of several temporary variables). **/ template class Spline { public: typedef _Scalar Scalar; /*!< The spline curve's scalar type. */ enum { Dimension = _Dim /*!< The spline curve's dimension. */ }; enum { Degree = _Degree /*!< The spline curve's degree. */ }; /** \brief The point type the spline is representing. */ typedef typename SplineTraits::PointType PointType; /** \brief The data type used to store knot vectors. */ typedef typename SplineTraits::KnotVectorType KnotVectorType; /** \brief The data type used to store non-zero basis functions. */ typedef typename SplineTraits::BasisVectorType BasisVectorType; /** \brief The data type representing the spline's control points. */ typedef typename SplineTraits::ControlPointVectorType ControlPointVectorType; /** * \brief Creates a (constant) zero spline. * For Splines with dynamic degree, the resulting degree will be 0. **/ Spline() : m_knots(1, (Degree==Dynamic ? 2 : 2*Degree+2)) , m_ctrls(ControlPointVectorType::Zero(2,(Degree==Dynamic ? 1 : Degree+1))) { // in theory this code can go to the initializer list but it will get pretty // much unreadable ... enum { MinDegree = (Degree==Dynamic ? 0 : Degree) }; m_knots.template segment(0) = Array::Zero(); m_knots.template segment(MinDegree+1) = Array::Ones(); } /** * \brief Creates a spline from a knot vector and control points. * \param knots The spline's knot vector. * \param ctrls The spline's control point vector. **/ template Spline(const OtherVectorType& knots, const OtherArrayType& ctrls) : m_knots(knots), m_ctrls(ctrls) {} /** * \brief Copy constructor for splines. * \param spline The input spline. **/ template Spline(const Spline& spline) : m_knots(spline.knots()), m_ctrls(spline.ctrls()) {} /** * \brief Returns the knots of the underlying spline. **/ const KnotVectorType& knots() const { return m_knots; } /** * \brief Returns the knots of the underlying spline. **/ const ControlPointVectorType& ctrls() const { return m_ctrls; } /** * \brief Returns the spline value at a given site \f$u\f$. * * The function returns * \f{align*} * C(u) & = \sum_{i=0}^{n}N_{i,p}P_i * \f} * * \param u Parameter \f$u \in [0;1]\f$ at which the spline is evaluated. * \return The spline value at the given location \f$u\f$. **/ PointType operator()(Scalar u) const; /** * \brief Evaluation of spline derivatives of up-to given order. * * The function returns * \f{align*} * \frac{d^i}{du^i}C(u) & = \sum_{i=0}^{n} \frac{d^i}{du^i} N_{i,p}(u)P_i * \f} * for i ranging between 0 and order. * * \param u Parameter \f$u \in [0;1]\f$ at which the spline derivative is evaluated. * \param order The order up to which the derivatives are computed. **/ typename SplineTraits::DerivativeType derivatives(Scalar u, DenseIndex order) const; /** * \copydoc Spline::derivatives * Using the template version of this function is more efficieent since * temporary objects are allocated on the stack whenever this is possible. **/ template typename SplineTraits::DerivativeType derivatives(Scalar u, DenseIndex order = DerivativeOrder) const; /** * \brief Computes the non-zero basis functions at the given site. * * Splines have local support and a point from their image is defined * by exactly \f$p+1\f$ control points \f$P_i\f$ where \f$p\f$ is the * spline degree. * * This function computes the \f$p+1\f$ non-zero basis function values * for a given parameter value \f$u\f$. It returns * \f{align*}{ * N_{i,p}(u), \hdots, N_{i+p+1,p}(u) * \f} * * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis functions * are computed. **/ typename SplineTraits::BasisVectorType basisFunctions(Scalar u) const; /** * \brief Computes the non-zero spline basis function derivatives up to given order. * * The function computes * \f{align*}{ * \frac{d^i}{du^i} N_{i,p}(u), \hdots, \frac{d^i}{du^i} N_{i+p+1,p}(u) * \f} * with i ranging from 0 up to the specified order. * * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis function * derivatives are computed. * \param order The order up to which the basis function derivatives are computes. **/ typename SplineTraits::BasisDerivativeType basisFunctionDerivatives(Scalar u, DenseIndex order) const; /** * \copydoc Spline::basisFunctionDerivatives * Using the template version of this function is more efficieent since * temporary objects are allocated on the stack whenever this is possible. **/ template typename SplineTraits::BasisDerivativeType basisFunctionDerivatives(Scalar u, DenseIndex order = DerivativeOrder) const; /** * \brief Returns the spline degree. **/ DenseIndex degree() const; /** * \brief Returns the span within the knot vector in which u is falling. * \param u The site for which the span is determined. **/ DenseIndex span(Scalar u) const; /** * \brief Computes the spang within the provided knot vector in which u is falling. **/ static DenseIndex Span(typename SplineTraits::Scalar u, DenseIndex degree, const typename SplineTraits::KnotVectorType& knots); /** * \brief Returns the spline's non-zero basis functions. * * The function computes and returns * \f{align*}{ * N_{i,p}(u), \hdots, N_{i+p+1,p}(u) * \f} * * \param u The site at which the basis functions are computed. * \param degree The degree of the underlying spline. * \param knots The underlying spline's knot vector. **/ static BasisVectorType BasisFunctions(Scalar u, DenseIndex degree, const KnotVectorType& knots); private: KnotVectorType m_knots; /*!< Knot vector. */ ControlPointVectorType m_ctrls; /*!< Control points. */ }; template DenseIndex Spline<_Scalar, _Dim, _Degree>::Span( typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::Scalar u, DenseIndex degree, const typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::KnotVectorType& knots) { // Piegl & Tiller, "The NURBS Book", A2.1 (p. 68) if (u <= knots(0)) return degree; const Scalar* pos = std::upper_bound(knots.data()+degree-1, knots.data()+knots.size()-degree-1, u); return static_cast( std::distance(knots.data(), pos) - 1 ); } template typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType Spline<_Scalar, _Dim, _Degree>::BasisFunctions( typename Spline<_Scalar, _Dim, _Degree>::Scalar u, DenseIndex degree, const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots) { typedef typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType BasisVectorType; const DenseIndex p = degree; const DenseIndex i = Spline::Span(u, degree, knots); const KnotVectorType& U = knots; BasisVectorType left(p+1); left(0) = Scalar(0); BasisVectorType right(p+1); right(0) = Scalar(0); VectorBlock(left,1,p) = u - VectorBlock(U,i+1-p,p).reverse(); VectorBlock(right,1,p) = VectorBlock(U,i+1,p) - u; BasisVectorType N(1,p+1); N(0) = Scalar(1); for (DenseIndex j=1; j<=p; ++j) { Scalar saved = Scalar(0); for (DenseIndex r=0; r DenseIndex Spline<_Scalar, _Dim, _Degree>::degree() const { if (_Degree == Dynamic) return m_knots.size() - m_ctrls.cols() - 1; else return _Degree; } template DenseIndex Spline<_Scalar, _Dim, _Degree>::span(Scalar u) const { return Spline::Span(u, degree(), knots()); } template typename Spline<_Scalar, _Dim, _Degree>::PointType Spline<_Scalar, _Dim, _Degree>::operator()(Scalar u) const { enum { Order = SplineTraits::OrderAtCompileTime }; const DenseIndex span = this->span(u); const DenseIndex p = degree(); const BasisVectorType basis_funcs = basisFunctions(u); const Replicate ctrl_weights(basis_funcs); const Block ctrl_pts(ctrls(),0,span-p,Dimension,p+1); return (ctrl_weights * ctrl_pts).rowwise().sum(); } /* --------------------------------------------------------------------------------------------- */ template void derivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& der) { enum { Dimension = SplineTraits::Dimension }; enum { Order = SplineTraits::OrderAtCompileTime }; enum { DerivativeOrder = DerivativeType::ColsAtCompileTime }; typedef typename SplineTraits::ControlPointVectorType ControlPointVectorType; typedef typename SplineTraits::BasisDerivativeType BasisDerivativeType; typedef typename BasisDerivativeType::ConstRowXpr BasisDerivativeRowXpr; const DenseIndex p = spline.degree(); const DenseIndex span = spline.span(u); const DenseIndex n = (std::min)(p, order); der.resize(Dimension,n+1); // Retrieve the basis function derivatives up to the desired order... const BasisDerivativeType basis_func_ders = spline.template basisFunctionDerivatives(u, n+1); // ... and perform the linear combinations of the control points. for (DenseIndex der_order=0; der_order ctrl_weights( basis_func_ders.row(der_order) ); const Block ctrl_pts(spline.ctrls(),0,span-p,Dimension,p+1); der.col(der_order) = (ctrl_weights * ctrl_pts).rowwise().sum(); } } template typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::DerivativeType Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const { typename SplineTraits< Spline >::DerivativeType res; derivativesImpl(*this, u, order, res); return res; } template template typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::DerivativeType Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const { typename SplineTraits< Spline, DerivativeOrder >::DerivativeType res; derivativesImpl(*this, u, order, res); return res; } template typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisVectorType Spline<_Scalar, _Dim, _Degree>::basisFunctions(Scalar u) const { return Spline::BasisFunctions(u, degree(), knots()); } /* --------------------------------------------------------------------------------------------- */ template void basisFunctionDerivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& N_) { enum { Order = SplineTraits::OrderAtCompileTime }; typedef typename SplineTraits::Scalar Scalar; typedef typename SplineTraits::BasisVectorType BasisVectorType; typedef typename SplineTraits::KnotVectorType KnotVectorType; const KnotVectorType& U = spline.knots(); const DenseIndex p = spline.degree(); const DenseIndex span = spline.span(u); const DenseIndex n = (std::min)(p, order); N_.resize(n+1, p+1); BasisVectorType left = BasisVectorType::Zero(p+1); BasisVectorType right = BasisVectorType::Zero(p+1); Matrix ndu(p+1,p+1); double saved, temp; ndu(0,0) = 1.0; DenseIndex j; for (j=1; j<=p; ++j) { left[j] = u-U[span+1-j]; right[j] = U[span+j]-u; saved = 0.0; for (DenseIndex r=0; r(saved+right[r+1] * temp); saved = left[j-r] * temp; } ndu(j,j) = static_cast(saved); } for (j = p; j>=0; --j) N_(0,j) = ndu(j,p); // Compute the derivatives DerivativeType a(n+1,p+1); DenseIndex r=0; for (; r<=p; ++r) { DenseIndex s1,s2; s1 = 0; s2 = 1; // alternate rows in array a a(0,0) = 1.0; // Compute the k-th derivative for (DenseIndex k=1; k<=static_cast(n); ++k) { double d = 0.0; DenseIndex rk,pk,j1,j2; rk = r-k; pk = p-k; if (r>=k) { a(s2,0) = a(s1,0)/ndu(pk+1,rk); d = a(s2,0)*ndu(rk,pk); } if (rk>=-1) j1 = 1; else j1 = -rk; if (r-1 <= pk) j2 = k-1; else j2 = p-r; for (j=j1; j<=j2; ++j) { a(s2,j) = (a(s1,j)-a(s1,j-1))/ndu(pk+1,rk+j); d += a(s2,j)*ndu(rk+j,pk); } if (r<=pk) { a(s2,k) = -a(s1,k-1)/ndu(pk+1,r); d += a(s2,k)*ndu(r,pk); } N_(k,r) = static_cast(d); j = s1; s1 = s2; s2 = j; // Switch rows } } /* Multiply through by the correct factors */ /* (Eq. [2.9]) */ r = p; for (DenseIndex k=1; k<=static_cast(n); ++k) { for (DenseIndex j=p; j>=0; --j) N_(k,j) *= r; r *= p-k; } } template typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const { typename SplineTraits< Spline >::BasisDerivativeType der; basisFunctionDerivativesImpl(*this, u, order, der); return der; } template template typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const { typename SplineTraits< Spline, DerivativeOrder >::BasisDerivativeType der; basisFunctionDerivativesImpl(*this, u, order, der); return der; } } #endif // EIGEN_SPLINE_H RcppEigen/inst/include/unsupported/Eigen/src/Eigenvalues/0000755000176200001440000000000012446006401023200 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h0000644000176200001440000007216412446006401031052 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 David Harmon // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 3 of the License, or (at your option) any later version. // // Alternatively, you can redistribute it and/or // modify it under the terms of the GNU General Public License as // published by the Free Software Foundation; either version 2 of // the License, or (at your option) any later version. // // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the // GNU General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License and a copy of the GNU General Public License along with // Eigen. If not, see . #ifndef EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H #define EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H #include namespace Eigen { namespace internal { template struct arpack_wrapper; template struct OP; } template, bool BisSPD=false> class ArpackGeneralizedSelfAdjointEigenSolver { public: //typedef typename MatrixSolver::MatrixType MatrixType; /** \brief Scalar type for matrices of type \p MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; /** \brief Real scalar type for \p MatrixType. * * This is just \c Scalar if #Scalar is real (e.g., \c float or * \c Scalar), and the type of the real part of \c Scalar if #Scalar is * complex. */ typedef typename NumTraits::Real RealScalar; /** \brief Type for vector of eigenvalues as returned by eigenvalues(). * * This is a column vector with entries of type #RealScalar. * The length of the vector is the size of \p nbrEigenvalues. */ typedef typename internal::plain_col_type::type RealVectorType; /** \brief Default constructor. * * The default constructor is for cases in which the user intends to * perform decompositions via compute(). * */ ArpackGeneralizedSelfAdjointEigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_eigenvectorsOk(false), m_nbrConverged(0), m_nbrIterations(0) { } /** \brief Constructor; computes generalized eigenvalues of given matrix with respect to another matrix. * * \param[in] A Self-adjoint matrix whose eigenvalues / eigenvectors will * computed. By default, the upper triangular part is used, but can be changed * through the template parameter. * \param[in] B Self-adjoint matrix for the generalized eigenvalue problem. * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute. * Must be less than the size of the input matrix, or an error is returned. * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with * respective meanings to find the largest magnitude , smallest magnitude, * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this * value can contain floating point value in string form, in which case the * eigenvalues closest to this value will be found. * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which * means machine precision. * * This constructor calls compute(const MatrixType&, const MatrixType&, Index, string, int, RealScalar) * to compute the eigenvalues of the matrix \p A with respect to \p B. The eigenvectors are computed if * \p options equals #ComputeEigenvectors. * */ ArpackGeneralizedSelfAdjointEigenSolver(const MatrixType& A, const MatrixType& B, Index nbrEigenvalues, std::string eigs_sigma="LM", int options=ComputeEigenvectors, RealScalar tol=0.0) : m_eivec(), m_eivalues(), m_isInitialized(false), m_eigenvectorsOk(false), m_nbrConverged(0), m_nbrIterations(0) { compute(A, B, nbrEigenvalues, eigs_sigma, options, tol); } /** \brief Constructor; computes eigenvalues of given matrix. * * \param[in] A Self-adjoint matrix whose eigenvalues / eigenvectors will * computed. By default, the upper triangular part is used, but can be changed * through the template parameter. * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute. * Must be less than the size of the input matrix, or an error is returned. * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with * respective meanings to find the largest magnitude , smallest magnitude, * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this * value can contain floating point value in string form, in which case the * eigenvalues closest to this value will be found. * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which * means machine precision. * * This constructor calls compute(const MatrixType&, Index, string, int, RealScalar) * to compute the eigenvalues of the matrix \p A. The eigenvectors are computed if * \p options equals #ComputeEigenvectors. * */ ArpackGeneralizedSelfAdjointEigenSolver(const MatrixType& A, Index nbrEigenvalues, std::string eigs_sigma="LM", int options=ComputeEigenvectors, RealScalar tol=0.0) : m_eivec(), m_eivalues(), m_isInitialized(false), m_eigenvectorsOk(false), m_nbrConverged(0), m_nbrIterations(0) { compute(A, nbrEigenvalues, eigs_sigma, options, tol); } /** \brief Computes generalized eigenvalues / eigenvectors of given matrix using the external ARPACK library. * * \param[in] A Selfadjoint matrix whose eigendecomposition is to be computed. * \param[in] B Selfadjoint matrix for generalized eigenvalues. * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute. * Must be less than the size of the input matrix, or an error is returned. * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with * respective meanings to find the largest magnitude , smallest magnitude, * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this * value can contain floating point value in string form, in which case the * eigenvalues closest to this value will be found. * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which * means machine precision. * * \returns Reference to \c *this * * This function computes the generalized eigenvalues of \p A with respect to \p B using ARPACK. The eigenvalues() * function can be used to retrieve them. If \p options equals #ComputeEigenvectors, * then the eigenvectors are also computed and can be retrieved by * calling eigenvectors(). * */ ArpackGeneralizedSelfAdjointEigenSolver& compute(const MatrixType& A, const MatrixType& B, Index nbrEigenvalues, std::string eigs_sigma="LM", int options=ComputeEigenvectors, RealScalar tol=0.0); /** \brief Computes eigenvalues / eigenvectors of given matrix using the external ARPACK library. * * \param[in] A Selfadjoint matrix whose eigendecomposition is to be computed. * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute. * Must be less than the size of the input matrix, or an error is returned. * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with * respective meanings to find the largest magnitude , smallest magnitude, * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this * value can contain floating point value in string form, in which case the * eigenvalues closest to this value will be found. * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which * means machine precision. * * \returns Reference to \c *this * * This function computes the eigenvalues of \p A using ARPACK. The eigenvalues() * function can be used to retrieve them. If \p options equals #ComputeEigenvectors, * then the eigenvectors are also computed and can be retrieved by * calling eigenvectors(). * */ ArpackGeneralizedSelfAdjointEigenSolver& compute(const MatrixType& A, Index nbrEigenvalues, std::string eigs_sigma="LM", int options=ComputeEigenvectors, RealScalar tol=0.0); /** \brief Returns the eigenvectors of given matrix. * * \returns A const reference to the matrix whose columns are the eigenvectors. * * \pre The eigenvectors have been computed before. * * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The * eigenvectors are normalized to have (Euclidean) norm equal to one. If * this object was used to solve the eigenproblem for the selfadjoint * matrix \f$ A \f$, then the matrix returned by this function is the * matrix \f$ V \f$ in the eigendecomposition \f$ A V = D V \f$. * For the generalized eigenproblem, the matrix returned is the solution \f$ A V = D B V \f$ * * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out * * \sa eigenvalues() */ const Matrix& eigenvectors() const { eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec; } /** \brief Returns the eigenvalues of given matrix. * * \returns A const reference to the column vector containing the eigenvalues. * * \pre The eigenvalues have been computed before. * * The eigenvalues are repeated according to their algebraic multiplicity, * so there are as many eigenvalues as rows in the matrix. The eigenvalues * are sorted in increasing order. * * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out * * \sa eigenvectors(), MatrixBase::eigenvalues() */ const Matrix& eigenvalues() const { eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized."); return m_eivalues; } /** \brief Computes the positive-definite square root of the matrix. * * \returns the positive-definite square root of the matrix * * \pre The eigenvalues and eigenvectors of a positive-definite matrix * have been computed before. * * The square root of a positive-definite matrix \f$ A \f$ is the * positive-definite matrix whose square equals \f$ A \f$. This function * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$. * * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out * * \sa operatorInverseSqrt(), * \ref MatrixFunctions_Module "MatrixFunctions Module" */ Matrix operatorSqrt() const { eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint(); } /** \brief Computes the inverse square root of the matrix. * * \returns the inverse positive-definite square root of the matrix * * \pre The eigenvalues and eigenvectors of a positive-definite matrix * have been computed before. * * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is * cheaper than first computing the square root with operatorSqrt() and * then its inverse with MatrixBase::inverse(). * * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out * * \sa operatorSqrt(), MatrixBase::inverse(), * \ref MatrixFunctions_Module "MatrixFunctions Module" */ Matrix operatorInverseSqrt() const { eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint(); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, \c NoConvergence otherwise. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized."); return m_info; } size_t getNbrConvergedEigenValues() const { return m_nbrConverged; } size_t getNbrIterations() const { return m_nbrIterations; } protected: Matrix m_eivec; Matrix m_eivalues; ComputationInfo m_info; bool m_isInitialized; bool m_eigenvectorsOk; size_t m_nbrConverged; size_t m_nbrIterations; }; template ArpackGeneralizedSelfAdjointEigenSolver& ArpackGeneralizedSelfAdjointEigenSolver ::compute(const MatrixType& A, Index nbrEigenvalues, std::string eigs_sigma, int options, RealScalar tol) { MatrixType B(0,0); compute(A, B, nbrEigenvalues, eigs_sigma, options, tol); return *this; } template ArpackGeneralizedSelfAdjointEigenSolver& ArpackGeneralizedSelfAdjointEigenSolver ::compute(const MatrixType& A, const MatrixType& B, Index nbrEigenvalues, std::string eigs_sigma, int options, RealScalar tol) { eigen_assert(A.cols() == A.rows()); eigen_assert(B.cols() == B.rows()); eigen_assert(B.rows() == 0 || A.cols() == B.rows()); eigen_assert((options &~ (EigVecMask | GenEigMask)) == 0 && (options & EigVecMask) != EigVecMask && "invalid option parameter"); bool isBempty = (B.rows() == 0) || (B.cols() == 0); // For clarity, all parameters match their ARPACK name // // Always 0 on the first call // int ido = 0; int n = (int)A.cols(); // User options: "LA", "SA", "SM", "LM", "BE" // char whch[3] = "LM"; // Specifies the shift if iparam[6] = { 3, 4, 5 }, not used if iparam[6] = { 1, 2 } // RealScalar sigma = 0.0; if (eigs_sigma.length() >= 2 && isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1])) { eigs_sigma[0] = toupper(eigs_sigma[0]); eigs_sigma[1] = toupper(eigs_sigma[1]); // In the following special case we're going to invert the problem, since solving // for larger magnitude is much much faster // i.e., if 'SM' is specified, we're going to really use 'LM', the default // if (eigs_sigma.substr(0,2) != "SM") { whch[0] = eigs_sigma[0]; whch[1] = eigs_sigma[1]; } } else { eigen_assert(false && "Specifying clustered eigenvalues is not yet supported!"); // If it's not scalar values, then the user may be explicitly // specifying the sigma value to cluster the evs around // sigma = atof(eigs_sigma.c_str()); // If atof fails, it returns 0.0, which is a fine default // } // "I" means normal eigenvalue problem, "G" means generalized // char bmat[2] = "I"; if (eigs_sigma.substr(0,2) == "SM" || !(isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1])) || (!isBempty && !BisSPD)) bmat[0] = 'G'; // Now we determine the mode to use // int mode = (bmat[0] == 'G') + 1; if (eigs_sigma.substr(0,2) == "SM" || !(isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1]))) { // We're going to use shift-and-invert mode, and basically find // the largest eigenvalues of the inverse operator // mode = 3; } // The user-specified number of eigenvalues/vectors to compute // int nev = (int)nbrEigenvalues; // Allocate space for ARPACK to store the residual // Scalar *resid = new Scalar[n]; // Number of Lanczos vectors, must satisfy nev < ncv <= n // Note that this indicates that nev != n, and we cannot compute // all eigenvalues of a mtrix // int ncv = std::min(std::max(2*nev, 20), n); // The working n x ncv matrix, also store the final eigenvectors (if computed) // Scalar *v = new Scalar[n*ncv]; int ldv = n; // Working space // Scalar *workd = new Scalar[3*n]; int lworkl = ncv*ncv+8*ncv; // Must be at least this length Scalar *workl = new Scalar[lworkl]; int *iparam= new int[11]; iparam[0] = 1; // 1 means we let ARPACK perform the shifts, 0 means we'd have to do it iparam[2] = std::max(300, (int)std::ceil(2*n/std::max(ncv,1))); iparam[6] = mode; // The mode, 1 is standard ev problem, 2 for generalized ev, 3 for shift-and-invert // Used during reverse communicate to notify where arrays start // int *ipntr = new int[11]; // Error codes are returned in here, initial value of 0 indicates a random initial // residual vector is used, any other values means resid contains the initial residual // vector, possibly from a previous run // int info = 0; Scalar scale = 1.0; //if (!isBempty) //{ //Scalar scale = B.norm() / std::sqrt(n); //scale = std::pow(2, std::floor(std::log(scale+1))); ////M /= scale; //for (size_t i=0; i<(size_t)B.outerSize(); i++) // for (typename MatrixType::InnerIterator it(B, i); it; ++it) // it.valueRef() /= scale; //} MatrixSolver OP; if (mode == 1 || mode == 2) { if (!isBempty) OP.compute(B); } else if (mode == 3) { if (sigma == 0.0) { OP.compute(A); } else { // Note: We will never enter here because sigma must be 0.0 // if (isBempty) { MatrixType AminusSigmaB(A); for (Index i=0; i::saupd(&ido, bmat, &n, whch, &nev, &tol, resid, &ncv, v, &ldv, iparam, ipntr, workd, workl, &lworkl, &info); if (ido == -1 || ido == 1) { Scalar *in = workd + ipntr[0] - 1; Scalar *out = workd + ipntr[1] - 1; if (ido == 1 && mode != 2) { Scalar *out2 = workd + ipntr[2] - 1; if (isBempty || mode == 1) Matrix::Map(out2, n) = Matrix::Map(in, n); else Matrix::Map(out2, n) = B * Matrix::Map(in, n); in = workd + ipntr[2] - 1; } if (mode == 1) { if (isBempty) { // OP = A // Matrix::Map(out, n) = A * Matrix::Map(in, n); } else { // OP = L^{-1}AL^{-T} // internal::OP::applyOP(OP, A, n, in, out); } } else if (mode == 2) { if (ido == 1) Matrix::Map(in, n) = A * Matrix::Map(in, n); // OP = B^{-1} A // Matrix::Map(out, n) = OP.solve(Matrix::Map(in, n)); } else if (mode == 3) { // OP = (A-\sigmaB)B (\sigma could be 0, and B could be I) // The B * in is already computed and stored at in if ido == 1 // if (ido == 1 || isBempty) Matrix::Map(out, n) = OP.solve(Matrix::Map(in, n)); else Matrix::Map(out, n) = OP.solve(B * Matrix::Map(in, n)); } } else if (ido == 2) { Scalar *in = workd + ipntr[0] - 1; Scalar *out = workd + ipntr[1] - 1; if (isBempty || mode == 1) Matrix::Map(out, n) = Matrix::Map(in, n); else Matrix::Map(out, n) = B * Matrix::Map(in, n); } } while (ido != 99); if (info == 1) m_info = NoConvergence; else if (info == 3) m_info = NumericalIssue; else if (info < 0) m_info = InvalidInput; else if (info != 0) eigen_assert(false && "Unknown ARPACK return value!"); else { // Do we compute eigenvectors or not? // int rvec = (options & ComputeEigenvectors) == ComputeEigenvectors; // "A" means "All", use "S" to choose specific eigenvalues (not yet supported in ARPACK)) // char howmny[2] = "A"; // if howmny == "S", specifies the eigenvalues to compute (not implemented in ARPACK) // int *select = new int[ncv]; // Final eigenvalues // m_eivalues.resize(nev, 1); internal::arpack_wrapper::seupd(&rvec, howmny, select, m_eivalues.data(), v, &ldv, &sigma, bmat, &n, whch, &nev, &tol, resid, &ncv, v, &ldv, iparam, ipntr, workd, workl, &lworkl, &info); if (info == -14) m_info = NoConvergence; else if (info != 0) m_info = InvalidInput; else { if (rvec) { m_eivec.resize(A.rows(), nev); for (int i=0; i::project(OP, n, nev, m_eivec.data()); m_eigenvectorsOk = true; } m_nbrIterations = iparam[2]; m_nbrConverged = iparam[4]; m_info = Success; } delete select; } delete v; delete iparam; delete ipntr; delete workd; delete workl; delete resid; m_isInitialized = true; return *this; } // Single precision // extern "C" void ssaupd_(int *ido, char *bmat, int *n, char *which, int *nev, float *tol, float *resid, int *ncv, float *v, int *ldv, int *iparam, int *ipntr, float *workd, float *workl, int *lworkl, int *info); extern "C" void sseupd_(int *rvec, char *All, int *select, float *d, float *z, int *ldz, float *sigma, char *bmat, int *n, char *which, int *nev, float *tol, float *resid, int *ncv, float *v, int *ldv, int *iparam, int *ipntr, float *workd, float *workl, int *lworkl, int *ierr); // Double precision // extern "C" void dsaupd_(int *ido, char *bmat, int *n, char *which, int *nev, double *tol, double *resid, int *ncv, double *v, int *ldv, int *iparam, int *ipntr, double *workd, double *workl, int *lworkl, int *info); extern "C" void dseupd_(int *rvec, char *All, int *select, double *d, double *z, int *ldz, double *sigma, char *bmat, int *n, char *which, int *nev, double *tol, double *resid, int *ncv, double *v, int *ldv, int *iparam, int *ipntr, double *workd, double *workl, int *lworkl, int *ierr); namespace internal { template struct arpack_wrapper { static inline void saupd(int *ido, char *bmat, int *n, char *which, int *nev, RealScalar *tol, Scalar *resid, int *ncv, Scalar *v, int *ldv, int *iparam, int *ipntr, Scalar *workd, Scalar *workl, int *lworkl, int *info) { EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) } static inline void seupd(int *rvec, char *All, int *select, Scalar *d, Scalar *z, int *ldz, RealScalar *sigma, char *bmat, int *n, char *which, int *nev, RealScalar *tol, Scalar *resid, int *ncv, Scalar *v, int *ldv, int *iparam, int *ipntr, Scalar *workd, Scalar *workl, int *lworkl, int *ierr) { EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) } }; template <> struct arpack_wrapper { static inline void saupd(int *ido, char *bmat, int *n, char *which, int *nev, float *tol, float *resid, int *ncv, float *v, int *ldv, int *iparam, int *ipntr, float *workd, float *workl, int *lworkl, int *info) { ssaupd_(ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info); } static inline void seupd(int *rvec, char *All, int *select, float *d, float *z, int *ldz, float *sigma, char *bmat, int *n, char *which, int *nev, float *tol, float *resid, int *ncv, float *v, int *ldv, int *iparam, int *ipntr, float *workd, float *workl, int *lworkl, int *ierr) { sseupd_(rvec, All, select, d, z, ldz, sigma, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, ierr); } }; template <> struct arpack_wrapper { static inline void saupd(int *ido, char *bmat, int *n, char *which, int *nev, double *tol, double *resid, int *ncv, double *v, int *ldv, int *iparam, int *ipntr, double *workd, double *workl, int *lworkl, int *info) { dsaupd_(ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info); } static inline void seupd(int *rvec, char *All, int *select, double *d, double *z, int *ldz, double *sigma, char *bmat, int *n, char *which, int *nev, double *tol, double *resid, int *ncv, double *v, int *ldv, int *iparam, int *ipntr, double *workd, double *workl, int *lworkl, int *ierr) { dseupd_(rvec, All, select, d, v, ldv, sigma, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, ierr); } }; template struct OP { static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out); static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs); }; template struct OP { static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out) { // OP = L^{-1} A L^{-T} (B = LL^T) // // First solve L^T out = in // Matrix::Map(out, n) = OP.matrixU().solve(Matrix::Map(in, n)); Matrix::Map(out, n) = OP.permutationPinv() * Matrix::Map(out, n); // Then compute out = A out // Matrix::Map(out, n) = A * Matrix::Map(out, n); // Then solve L out = out // Matrix::Map(out, n) = OP.permutationP() * Matrix::Map(out, n); Matrix::Map(out, n) = OP.matrixL().solve(Matrix::Map(out, n)); } static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs) { // Solve L^T out = in // Matrix::Map(vecs, n, k) = OP.matrixU().solve(Matrix::Map(vecs, n, k)); Matrix::Map(vecs, n, k) = OP.permutationPinv() * Matrix::Map(vecs, n, k); } }; template struct OP { static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out) { eigen_assert(false && "Should never be in here..."); } static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs) { eigen_assert(false && "Should never be in here..."); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_ARPACKSELFADJOINTEIGENSOLVER_H RcppEigen/inst/include/unsupported/Eigen/src/NumericalDiff/0000755000176200001440000000000012253717461023454 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h0000644000176200001440000000766412253717461026352 0ustar liggesusers// -*- coding: utf-8 // vim: set fileencoding=utf-8 // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NUMERICAL_DIFF_H #define EIGEN_NUMERICAL_DIFF_H namespace Eigen { enum NumericalDiffMode { Forward, Central }; /** * This class allows you to add a method df() to your functor, which will * use numerical differentiation to compute an approximate of the * derivative for the functor. Of course, if you have an analytical form * for the derivative, you should rather implement df() by yourself. * * More information on * http://en.wikipedia.org/wiki/Numerical_differentiation * * Currently only "Forward" and "Central" scheme are implemented. */ template class NumericalDiff : public _Functor { public: typedef _Functor Functor; typedef typename Functor::Scalar Scalar; typedef typename Functor::InputType InputType; typedef typename Functor::ValueType ValueType; typedef typename Functor::JacobianType JacobianType; NumericalDiff(Scalar _epsfcn=0.) : Functor(), epsfcn(_epsfcn) {} NumericalDiff(const Functor& f, Scalar _epsfcn=0.) : Functor(f), epsfcn(_epsfcn) {} // forward constructors template NumericalDiff(const T0& a0) : Functor(a0), epsfcn(0) {} template NumericalDiff(const T0& a0, const T1& a1) : Functor(a0, a1), epsfcn(0) {} template NumericalDiff(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2), epsfcn(0) {} enum { InputsAtCompileTime = Functor::InputsAtCompileTime, ValuesAtCompileTime = Functor::ValuesAtCompileTime }; /** * return the number of evaluation of functor */ int df(const InputType& _x, JacobianType &jac) const { using std::sqrt; using std::abs; /* Local variables */ Scalar h; int nfev=0; const typename InputType::Index n = _x.size(); const Scalar eps = sqrt(((std::max)(epsfcn,NumTraits::epsilon() ))); ValueType val1, val2; InputType x = _x; // TODO : we should do this only if the size is not already known val1.resize(Functor::values()); val2.resize(Functor::values()); // initialization switch(mode) { case Forward: // compute f(x) Functor::operator()(x, val1); nfev++; break; case Central: // do nothing break; default: eigen_assert(false); }; // Function Body for (int j = 0; j < n; ++j) { h = eps * abs(x[j]); if (h == 0.) { h = eps; } switch(mode) { case Forward: x[j] += h; Functor::operator()(x, val2); nfev++; x[j] = _x[j]; jac.col(j) = (val2-val1)/h; break; case Central: x[j] += h; Functor::operator()(x, val2); nfev++; x[j] -= 2*h; Functor::operator()(x, val1); nfev++; x[j] = _x[j]; jac.col(j) = (val2-val1)/(2*h); break; default: eigen_assert(false); }; } return nfev; } private: Scalar epsfcn; NumericalDiff& operator=(const NumericalDiff&); }; } // end namespace Eigen //vim: ai ts=4 sts=4 et sw=4 #endif // EIGEN_NUMERICAL_DIFF_H RcppEigen/inst/include/unsupported/Eigen/src/KroneckerProduct/0000755000176200001440000000000012253717461024230 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h0000644000176200001440000002005512253717461031062 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Kolja Brix // Copyright (C) 2011 Andreas Platen // Copyright (C) 2012 Chen-Pang He // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef KRONECKER_TENSOR_PRODUCT_H #define KRONECKER_TENSOR_PRODUCT_H namespace Eigen { template class SparseMatrix; /*! * \brief Kronecker tensor product helper class for dense matrices * * This class is the return value of kroneckerProduct(MatrixBase, * MatrixBase). Use the function rather than construct this class * directly to avoid specifying template prarameters. * * \tparam Lhs Type of the left-hand side, a matrix expression. * \tparam Rhs Type of the rignt-hand side, a matrix expression. */ template class KroneckerProduct : public ReturnByValue > { private: typedef ReturnByValue Base; typedef typename Base::Scalar Scalar; typedef typename Base::Index Index; public: /*! \brief Constructor. */ KroneckerProduct(const Lhs& A, const Rhs& B) : m_A(A), m_B(B) {} /*! \brief Evaluate the Kronecker tensor product. */ template void evalTo(Dest& dst) const; inline Index rows() const { return m_A.rows() * m_B.rows(); } inline Index cols() const { return m_A.cols() * m_B.cols(); } Scalar coeff(Index row, Index col) const { return m_A.coeff(row / m_B.rows(), col / m_B.cols()) * m_B.coeff(row % m_B.rows(), col % m_B.cols()); } Scalar coeff(Index i) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(KroneckerProduct); return m_A.coeff(i / m_A.size()) * m_B.coeff(i % m_A.size()); } private: typename Lhs::Nested m_A; typename Rhs::Nested m_B; }; /*! * \brief Kronecker tensor product helper class for sparse matrices * * If at least one of the operands is a sparse matrix expression, * then this class is returned and evaluates into a sparse matrix. * * This class is the return value of kroneckerProduct(EigenBase, * EigenBase). Use the function rather than construct this class * directly to avoid specifying template prarameters. * * \tparam Lhs Type of the left-hand side, a matrix expression. * \tparam Rhs Type of the rignt-hand side, a matrix expression. */ template class KroneckerProductSparse : public EigenBase > { private: typedef typename internal::traits::Index Index; public: /*! \brief Constructor. */ KroneckerProductSparse(const Lhs& A, const Rhs& B) : m_A(A), m_B(B) {} /*! \brief Evaluate the Kronecker tensor product. */ template void evalTo(Dest& dst) const; inline Index rows() const { return m_A.rows() * m_B.rows(); } inline Index cols() const { return m_A.cols() * m_B.cols(); } template operator SparseMatrix() { SparseMatrix result; evalTo(result.derived()); return result; } private: typename Lhs::Nested m_A; typename Rhs::Nested m_B; }; template template void KroneckerProduct::evalTo(Dest& dst) const { const int BlockRows = Rhs::RowsAtCompileTime, BlockCols = Rhs::ColsAtCompileTime; const Index Br = m_B.rows(), Bc = m_B.cols(); for (Index i=0; i < m_A.rows(); ++i) for (Index j=0; j < m_A.cols(); ++j) Block(dst,i*Br,j*Bc,Br,Bc) = m_A.coeff(i,j) * m_B; } template template void KroneckerProductSparse::evalTo(Dest& dst) const { const Index Br = m_B.rows(), Bc = m_B.cols(); dst.resize(rows(),cols()); dst.resizeNonZeros(0); dst.reserve(m_A.nonZeros() * m_B.nonZeros()); for (Index kA=0; kA < m_A.outerSize(); ++kA) { for (Index kB=0; kB < m_B.outerSize(); ++kB) { for (typename Lhs::InnerIterator itA(m_A,kA); itA; ++itA) { for (typename Rhs::InnerIterator itB(m_B,kB); itB; ++itB) { const Index i = itA.row() * Br + itB.row(), j = itA.col() * Bc + itB.col(); dst.insert(i,j) = itA.value() * itB.value(); } } } } } namespace internal { template struct traits > { typedef typename remove_all<_Lhs>::type Lhs; typedef typename remove_all<_Rhs>::type Rhs; typedef typename scalar_product_traits::ReturnType Scalar; enum { Rows = size_at_compile_time::RowsAtCompileTime, traits::RowsAtCompileTime>::ret, Cols = size_at_compile_time::ColsAtCompileTime, traits::ColsAtCompileTime>::ret, MaxRows = size_at_compile_time::MaxRowsAtCompileTime, traits::MaxRowsAtCompileTime>::ret, MaxCols = size_at_compile_time::MaxColsAtCompileTime, traits::MaxColsAtCompileTime>::ret, CoeffReadCost = Lhs::CoeffReadCost + Rhs::CoeffReadCost + NumTraits::MulCost }; typedef Matrix ReturnType; }; template struct traits > { typedef MatrixXpr XprKind; typedef typename remove_all<_Lhs>::type Lhs; typedef typename remove_all<_Rhs>::type Rhs; typedef typename scalar_product_traits::ReturnType Scalar; typedef typename promote_storage_type::StorageKind, typename traits::StorageKind>::ret StorageKind; typedef typename promote_index_type::type Index; enum { LhsFlags = Lhs::Flags, RhsFlags = Rhs::Flags, RowsAtCompileTime = size_at_compile_time::RowsAtCompileTime, traits::RowsAtCompileTime>::ret, ColsAtCompileTime = size_at_compile_time::ColsAtCompileTime, traits::ColsAtCompileTime>::ret, MaxRowsAtCompileTime = size_at_compile_time::MaxRowsAtCompileTime, traits::MaxRowsAtCompileTime>::ret, MaxColsAtCompileTime = size_at_compile_time::MaxColsAtCompileTime, traits::MaxColsAtCompileTime>::ret, EvalToRowMajor = (LhsFlags & RhsFlags & RowMajorBit), RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit), Flags = ((LhsFlags | RhsFlags) & HereditaryBits & RemovedBits) | EvalBeforeNestingBit | EvalBeforeAssigningBit, CoeffReadCost = Dynamic }; }; } // end namespace internal /*! * \ingroup KroneckerProduct_Module * * Computes Kronecker tensor product of two dense matrices * * \warning If you want to replace a matrix by its Kronecker product * with some matrix, do \b NOT do this: * \code * A = kroneckerProduct(A,B); // bug!!! caused by aliasing effect * \endcode * instead, use eval() to work around this: * \code * A = kroneckerProduct(A,B).eval(); * \endcode * * \param a Dense matrix a * \param b Dense matrix b * \return Kronecker tensor product of a and b */ template KroneckerProduct kroneckerProduct(const MatrixBase& a, const MatrixBase& b) { return KroneckerProduct(a.derived(), b.derived()); } /*! * \ingroup KroneckerProduct_Module * * Computes Kronecker tensor product of two matrices, at least one of * which is sparse * * \param a Dense/sparse matrix a * \param b Dense/sparse matrix b * \return Kronecker tensor product of a and b, stored in a sparse * matrix */ template KroneckerProductSparse kroneckerProduct(const EigenBase& a, const EigenBase& b) { return KroneckerProductSparse(a.derived(), b.derived()); } } // end namespace Eigen #endif // KRONECKER_TENSOR_PRODUCT_H RcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/0000755000176200001440000000000012253717461024536 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h0000644000176200001440000003050712253717461030506 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // Copyright (C) 2012 Desire Nuentsa // // The algorithm of this class initially comes from MINPACK whose original authors are: // Copyright Jorge More - Argonne National Laboratory // Copyright Burt Garbow - Argonne National Laboratory // Copyright Ken Hillstrom - Argonne National Laboratory // // This Source Code Form is subject to the terms of the Minpack license // (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LEVENBERGMARQUARDT_H #define EIGEN_LEVENBERGMARQUARDT_H namespace Eigen { namespace LevenbergMarquardtSpace { enum Status { NotStarted = -2, Running = -1, ImproperInputParameters = 0, RelativeReductionTooSmall = 1, RelativeErrorTooSmall = 2, RelativeErrorAndReductionTooSmall = 3, CosinusTooSmall = 4, TooManyFunctionEvaluation = 5, FtolTooSmall = 6, XtolTooSmall = 7, GtolTooSmall = 8, UserAsked = 9 }; } template struct DenseFunctor { typedef _Scalar Scalar; enum { InputsAtCompileTime = NX, ValuesAtCompileTime = NY }; typedef Matrix InputType; typedef Matrix ValueType; typedef Matrix JacobianType; typedef ColPivHouseholderQR QRSolver; const int m_inputs, m_values; DenseFunctor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {} DenseFunctor(int inputs, int values) : m_inputs(inputs), m_values(values) {} int inputs() const { return m_inputs; } int values() const { return m_values; } //int operator()(const InputType &x, ValueType& fvec) { } // should be defined in derived classes //int df(const InputType &x, JacobianType& fjac) { } // should be defined in derived classes }; template struct SparseFunctor { typedef _Scalar Scalar; typedef _Index Index; typedef Matrix InputType; typedef Matrix ValueType; typedef SparseMatrix JacobianType; typedef SparseQR > QRSolver; enum { InputsAtCompileTime = Dynamic, ValuesAtCompileTime = Dynamic }; SparseFunctor(int inputs, int values) : m_inputs(inputs), m_values(values) {} int inputs() const { return m_inputs; } int values() const { return m_values; } const int m_inputs, m_values; //int operator()(const InputType &x, ValueType& fvec) { } // to be defined in the functor //int df(const InputType &x, JacobianType& fjac) { } // to be defined in the functor if no automatic differentiation }; namespace internal { template void lmpar2(const QRSolver &qr, const VectorType &diag, const VectorType &qtb, typename VectorType::Scalar m_delta, typename VectorType::Scalar &par, VectorType &x); } /** * \ingroup NonLinearOptimization_Module * \brief Performs non linear optimization over a non-linear function, * using a variant of the Levenberg Marquardt algorithm. * * Check wikipedia for more information. * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm */ template class LevenbergMarquardt : internal::no_assignment_operator { public: typedef _FunctorType FunctorType; typedef typename FunctorType::QRSolver QRSolver; typedef typename FunctorType::JacobianType JacobianType; typedef typename JacobianType::Scalar Scalar; typedef typename JacobianType::RealScalar RealScalar; typedef typename JacobianType::Index Index; typedef typename QRSolver::Index PermIndex; typedef Matrix FVectorType; typedef PermutationMatrix PermutationType; public: LevenbergMarquardt(FunctorType& functor) : m_functor(functor),m_nfev(0),m_njev(0),m_fnorm(0.0),m_gnorm(0), m_isInitialized(false),m_info(InvalidInput) { resetParameters(); m_useExternalScaling=false; } LevenbergMarquardtSpace::Status minimize(FVectorType &x); LevenbergMarquardtSpace::Status minimizeInit(FVectorType &x); LevenbergMarquardtSpace::Status minimizeOneStep(FVectorType &x); LevenbergMarquardtSpace::Status lmder1( FVectorType &x, const Scalar tol = std::sqrt(NumTraits::epsilon()) ); static LevenbergMarquardtSpace::Status lmdif1( FunctorType &functor, FVectorType &x, Index *nfev, const Scalar tol = std::sqrt(NumTraits::epsilon()) ); /** Sets the default parameters */ void resetParameters() { m_factor = 100.; m_maxfev = 400; m_ftol = std::sqrt(NumTraits::epsilon()); m_xtol = std::sqrt(NumTraits::epsilon()); m_gtol = 0. ; m_epsfcn = 0. ; } /** Sets the tolerance for the norm of the solution vector*/ void setXtol(RealScalar xtol) { m_xtol = xtol; } /** Sets the tolerance for the norm of the vector function*/ void setFtol(RealScalar ftol) { m_ftol = ftol; } /** Sets the tolerance for the norm of the gradient of the error vector*/ void setGtol(RealScalar gtol) { m_gtol = gtol; } /** Sets the step bound for the diagonal shift */ void setFactor(RealScalar factor) { m_factor = factor; } /** Sets the error precision */ void setEpsilon (RealScalar epsfcn) { m_epsfcn = epsfcn; } /** Sets the maximum number of function evaluation */ void setMaxfev(Index maxfev) {m_maxfev = maxfev; } /** Use an external Scaling. If set to true, pass a nonzero diagonal to diag() */ void setExternalScaling(bool value) {m_useExternalScaling = value; } /** \returns a reference to the diagonal of the jacobian */ FVectorType& diag() {return m_diag; } /** \returns the number of iterations performed */ Index iterations() { return m_iter; } /** \returns the number of functions evaluation */ Index nfev() { return m_nfev; } /** \returns the number of jacobian evaluation */ Index njev() { return m_njev; } /** \returns the norm of current vector function */ RealScalar fnorm() {return m_fnorm; } /** \returns the norm of the gradient of the error */ RealScalar gnorm() {return m_gnorm; } /** \returns the LevenbergMarquardt parameter */ RealScalar lm_param(void) { return m_par; } /** \returns a reference to the current vector function */ FVectorType& fvec() {return m_fvec; } /** \returns a reference to the matrix where the current Jacobian matrix is stored */ JacobianType& jacobian() {return m_fjac; } /** \returns a reference to the triangular matrix R from the QR of the jacobian matrix. * \sa jacobian() */ JacobianType& matrixR() {return m_rfactor; } /** the permutation used in the QR factorization */ PermutationType permutation() {return m_permutation; } /** * \brief Reports whether the minimization was successful * \returns \c Success if the minimization was succesful, * \c NumericalIssue if a numerical problem arises during the * minimization process, for exemple during the QR factorization * \c NoConvergence if the minimization did not converge after * the maximum number of function evaluation allowed * \c InvalidInput if the input matrix is invalid */ ComputationInfo info() const { return m_info; } private: JacobianType m_fjac; JacobianType m_rfactor; // The triangular matrix R from the QR of the jacobian matrix m_fjac FunctorType &m_functor; FVectorType m_fvec, m_qtf, m_diag; Index n; Index m; Index m_nfev; Index m_njev; RealScalar m_fnorm; // Norm of the current vector function RealScalar m_gnorm; //Norm of the gradient of the error RealScalar m_factor; // Index m_maxfev; // Maximum number of function evaluation RealScalar m_ftol; //Tolerance in the norm of the vector function RealScalar m_xtol; // RealScalar m_gtol; //tolerance of the norm of the error gradient RealScalar m_epsfcn; // Index m_iter; // Number of iterations performed RealScalar m_delta; bool m_useExternalScaling; PermutationType m_permutation; FVectorType m_wa1, m_wa2, m_wa3, m_wa4; //Temporary vectors RealScalar m_par; bool m_isInitialized; // Check whether the minimization step has been called ComputationInfo m_info; }; template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimize(FVectorType &x) { LevenbergMarquardtSpace::Status status = minimizeInit(x); if (status==LevenbergMarquardtSpace::ImproperInputParameters) { m_isInitialized = true; return status; } do { // std::cout << " uv " << x.transpose() << "\n"; status = minimizeOneStep(x); } while (status==LevenbergMarquardtSpace::Running); m_isInitialized = true; return status; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeInit(FVectorType &x) { n = x.size(); m = m_functor.values(); m_wa1.resize(n); m_wa2.resize(n); m_wa3.resize(n); m_wa4.resize(m); m_fvec.resize(m); //FIXME Sparse Case : Allocate space for the jacobian m_fjac.resize(m, n); // m_fjac.reserve(VectorXi::Constant(n,5)); // FIXME Find a better alternative if (!m_useExternalScaling) m_diag.resize(n); eigen_assert( (!m_useExternalScaling || m_diag.size()==n) || "When m_useExternalScaling is set, the caller must provide a valid 'm_diag'"); m_qtf.resize(n); /* Function Body */ m_nfev = 0; m_njev = 0; /* check the input parameters for errors. */ if (n <= 0 || m < n || m_ftol < 0. || m_xtol < 0. || m_gtol < 0. || m_maxfev <= 0 || m_factor <= 0.){ m_info = InvalidInput; return LevenbergMarquardtSpace::ImproperInputParameters; } if (m_useExternalScaling) for (Index j = 0; j < n; ++j) if (m_diag[j] <= 0.) { m_info = InvalidInput; return LevenbergMarquardtSpace::ImproperInputParameters; } /* evaluate the function at the starting point */ /* and calculate its norm. */ m_nfev = 1; if ( m_functor(x, m_fvec) < 0) return LevenbergMarquardtSpace::UserAsked; m_fnorm = m_fvec.stableNorm(); /* initialize levenberg-marquardt parameter and iteration counter. */ m_par = 0.; m_iter = 1; return LevenbergMarquardtSpace::NotStarted; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::lmder1( FVectorType &x, const Scalar tol ) { n = x.size(); m = m_functor.values(); /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) return LevenbergMarquardtSpace::ImproperInputParameters; resetParameters(); m_ftol = tol; m_xtol = tol; m_maxfev = 100*(n+1); return minimize(x); } template LevenbergMarquardtSpace::Status LevenbergMarquardt::lmdif1( FunctorType &functor, FVectorType &x, Index *nfev, const Scalar tol ) { Index n = x.size(); Index m = functor.values(); /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) return LevenbergMarquardtSpace::ImproperInputParameters; NumericalDiff numDiff(functor); // embedded LevenbergMarquardt LevenbergMarquardt > lm(numDiff); lm.setFtol(tol); lm.setXtol(tol); lm.setMaxfev(200*(n+1)); LevenbergMarquardtSpace::Status info = LevenbergMarquardtSpace::Status(lm.minimize(x)); if (nfev) * nfev = lm.nfev(); return info; } } // end namespace Eigen #endif // EIGEN_LEVENBERGMARQUARDT_H RcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h0000644000176200001440000001520012253717461026464 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // Copyright (C) 2012 Desire Nuentsa // // This code initially comes from MINPACK whose original authors are: // Copyright Jorge More - Argonne National Laboratory // Copyright Burt Garbow - Argonne National Laboratory // Copyright Ken Hillstrom - Argonne National Laboratory // // This Source Code Form is subject to the terms of the Minpack license // (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. #ifndef EIGEN_LMQRSOLV_H #define EIGEN_LMQRSOLV_H namespace Eigen { namespace internal { template void lmqrsolv( Matrix &s, const PermutationMatrix &iPerm, const Matrix &diag, const Matrix &qtb, Matrix &x, Matrix &sdiag) { /* Local variables */ Index i, j, k, l; Scalar temp; Index n = s.cols(); Matrix wa(n); JacobiRotation givens; /* Function Body */ // the following will only change the lower triangular part of s, including // the diagonal, though the diagonal is restored afterward /* copy r and (q transpose)*b to preserve input and initialize s. */ /* in particular, save the diagonal elements of r in x. */ x = s.diagonal(); wa = qtb; s.topLeftCorner(n,n).template triangularView() = s.topLeftCorner(n,n).transpose(); /* eliminate the diagonal matrix d using a givens rotation. */ for (j = 0; j < n; ++j) { /* prepare the row of d to be eliminated, locating the */ /* diagonal element using p from the qr factorization. */ l = iPerm.indices()(j); if (diag[l] == 0.) break; sdiag.tail(n-j).setZero(); sdiag[j] = diag[l]; /* the transformations to eliminate the row of d */ /* modify only a single element of (q transpose)*b */ /* beyond the first n, which is initially zero. */ Scalar qtbpj = 0.; for (k = j; k < n; ++k) { /* determine a givens rotation which eliminates the */ /* appropriate element in the current row of d. */ givens.makeGivens(-s(k,k), sdiag[k]); /* compute the modified diagonal element of r and */ /* the modified element of ((q transpose)*b,0). */ s(k,k) = givens.c() * s(k,k) + givens.s() * sdiag[k]; temp = givens.c() * wa[k] + givens.s() * qtbpj; qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj; wa[k] = temp; /* accumulate the tranformation in the row of s. */ for (i = k+1; i().solveInPlace(wa.head(nsing)); // restore sdiag = s.diagonal(); s.diagonal() = x; /* permute the components of z back to components of x. */ x = iPerm * wa; } template void lmqrsolv( SparseMatrix &s, const PermutationMatrix &iPerm, const Matrix &diag, const Matrix &qtb, Matrix &x, Matrix &sdiag) { /* Local variables */ typedef SparseMatrix FactorType; Index i, j, k, l; Scalar temp; Index n = s.cols(); Matrix wa(n); JacobiRotation givens; /* Function Body */ // the following will only change the lower triangular part of s, including // the diagonal, though the diagonal is restored afterward /* copy r and (q transpose)*b to preserve input and initialize R. */ wa = qtb; FactorType R(s); // Eliminate the diagonal matrix d using a givens rotation for (j = 0; j < n; ++j) { // Prepare the row of d to be eliminated, locating the // diagonal element using p from the qr factorization l = iPerm.indices()(j); if (diag(l) == Scalar(0)) break; sdiag.tail(n-j).setZero(); sdiag[j] = diag[l]; // the transformations to eliminate the row of d // modify only a single element of (q transpose)*b // beyond the first n, which is initially zero. Scalar qtbpj = 0; // Browse the nonzero elements of row j of the upper triangular s for (k = j; k < n; ++k) { typename FactorType::InnerIterator itk(R,k); for (; itk; ++itk){ if (itk.index() < k) continue; else break; } //At this point, we have the diagonal element R(k,k) // Determine a givens rotation which eliminates // the appropriate element in the current row of d givens.makeGivens(-itk.value(), sdiag(k)); // Compute the modified diagonal element of r and // the modified element of ((q transpose)*b,0). itk.valueRef() = givens.c() * itk.value() + givens.s() * sdiag(k); temp = givens.c() * wa(k) + givens.s() * qtbpj; qtbpj = -givens.s() * wa(k) + givens.c() * qtbpj; wa(k) = temp; // Accumulate the transformation in the remaining k row/column of R for (++itk; itk; ++itk) { i = itk.index(); temp = givens.c() * itk.value() + givens.s() * sdiag(i); sdiag(i) = -givens.s() * itk.value() + givens.c() * sdiag(i); itk.valueRef() = temp; } } } // Solve the triangular system for z. If the system is // singular, then obtain a least squares solution Index nsing; for(nsing = 0; nsing().solve/*InPlace*/(wa.head(nsing)); sdiag = R.diagonal(); // Permute the components of z back to components of x x = iPerm * wa; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_LMQRSOLV_H RcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h0000644000176200001440000001163712253717461025732 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This code initially comes from MINPACK whose original authors are: // Copyright Jorge More - Argonne National Laboratory // Copyright Burt Garbow - Argonne National Laboratory // Copyright Ken Hillstrom - Argonne National Laboratory // // This Source Code Form is subject to the terms of the Minpack license // (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. #ifndef EIGEN_LMPAR_H #define EIGEN_LMPAR_H namespace Eigen { namespace internal { template void lmpar2( const QRSolver &qr, const VectorType &diag, const VectorType &qtb, typename VectorType::Scalar m_delta, typename VectorType::Scalar &par, VectorType &x) { using std::sqrt; using std::abs; typedef typename QRSolver::MatrixType MatrixType; typedef typename QRSolver::Scalar Scalar; typedef typename QRSolver::Index Index; /* Local variables */ Index j; Scalar fp; Scalar parc, parl; Index iter; Scalar temp, paru; Scalar gnorm; Scalar dxnorm; // Make a copy of the triangular factor. // This copy is modified during call the qrsolv MatrixType s; s = qr.matrixR(); /* Function Body */ const Scalar dwarf = (std::numeric_limits::min)(); const Index n = qr.matrixR().cols(); eigen_assert(n==diag.size()); eigen_assert(n==qtb.size()); VectorType wa1, wa2; /* compute and store in x the gauss-newton direction. if the */ /* jacobian is rank-deficient, obtain a least squares solution. */ // const Index rank = qr.nonzeroPivots(); // exactly double(0.) const Index rank = qr.rank(); // use a threshold wa1 = qtb; wa1.tail(n-rank).setZero(); //FIXME There is no solve in place for sparse triangularView wa1.head(rank) = s.topLeftCorner(rank,rank).template triangularView().solve(qtb.head(rank)); x = qr.colsPermutation()*wa1; /* initialize the iteration counter. */ /* evaluate the function at the origin, and test */ /* for acceptance of the gauss-newton direction. */ iter = 0; wa2 = diag.cwiseProduct(x); dxnorm = wa2.blueNorm(); fp = dxnorm - m_delta; if (fp <= Scalar(0.1) * m_delta) { par = 0; return; } /* if the jacobian is not rank deficient, the newton */ /* step provides a lower bound, parl, for the zero of */ /* the function. otherwise set this bound to zero. */ parl = 0.; if (rank==n) { wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm; s.topLeftCorner(n,n).transpose().template triangularView().solveInPlace(wa1); temp = wa1.blueNorm(); parl = fp / m_delta / temp / temp; } /* calculate an upper bound, paru, for the zero of the function. */ for (j = 0; j < n; ++j) wa1[j] = s.col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)]; gnorm = wa1.stableNorm(); paru = gnorm / m_delta; if (paru == 0.) paru = dwarf / (std::min)(m_delta,Scalar(0.1)); /* if the input par lies outside of the interval (parl,paru), */ /* set par to the closer endpoint. */ par = (std::max)(par,parl); par = (std::min)(par,paru); if (par == 0.) par = gnorm / dxnorm; /* beginning of an iteration. */ while (true) { ++iter; /* evaluate the function at the current value of par. */ if (par == 0.) par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */ wa1 = sqrt(par)* diag; VectorType sdiag(n); lmqrsolv(s, qr.colsPermutation(), wa1, qtb, x, sdiag); wa2 = diag.cwiseProduct(x); dxnorm = wa2.blueNorm(); temp = fp; fp = dxnorm - m_delta; /* if the function is small enough, accept the current value */ /* of par. also test for the exceptional cases where parl */ /* is zero or the number of iterations has reached 10. */ if (abs(fp) <= Scalar(0.1) * m_delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10) break; /* compute the newton correction. */ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm); // we could almost use this here, but the diagonal is outside qr, in sdiag[] for (j = 0; j < n; ++j) { wa1[j] /= sdiag[j]; temp = wa1[j]; for (Index i = j+1; i < n; ++i) wa1[i] -= s.coeff(i,j) * temp; } temp = wa1.blueNorm(); parc = fp / m_delta / temp / temp; /* depending on the sign of the function, update parl or paru. */ if (fp > 0.) parl = (std::max)(parl,par); if (fp < 0.) paru = (std::min)(paru,par); /* compute an improved estimate for par. */ par = (std::max)(parl,par+parc); } if (iter == 0) par = 0.; return; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_LMPAR_H RcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/CopyrightMINPACK.txt0000644000176200001440000000422212253717461030252 0ustar liggesusersMinpack Copyright Notice (1999) University of Chicago. All rights reserved Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The end-user documentation included with the redistribution, if any, must include the following acknowledgment: "This product includes software developed by the University of Chicago, as Operator of Argonne National Laboratory. Alternately, this acknowledgment may appear in the software itself, if and wherever such third-party acknowledgments normally appear. 4. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS" WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4) DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL BE CORRECTED. 5. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT, INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE, EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE POSSIBILITY OF SUCH LOSS OR DAMAGES. RcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h0000644000176200001440000000465112253717461026260 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This code initially comes from MINPACK whose original authors are: // Copyright Jorge More - Argonne National Laboratory // Copyright Burt Garbow - Argonne National Laboratory // Copyright Ken Hillstrom - Argonne National Laboratory // // This Source Code Form is subject to the terms of the Minpack license // (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. #ifndef EIGEN_LMCOVAR_H #define EIGEN_LMCOVAR_H namespace Eigen { namespace internal { template void covar( Matrix< Scalar, Dynamic, Dynamic > &r, const VectorXi& ipvt, Scalar tol = std::sqrt(NumTraits::epsilon()) ) { using std::abs; typedef DenseIndex Index; /* Local variables */ Index i, j, k, l, ii, jj; bool sing; Scalar temp; /* Function Body */ const Index n = r.cols(); const Scalar tolr = tol * abs(r(0,0)); Matrix< Scalar, Dynamic, 1 > wa(n); eigen_assert(ipvt.size()==n); /* form the inverse of r in the full upper triangle of r. */ l = -1; for (k = 0; k < n; ++k) if (abs(r(k,k)) > tolr) { r(k,k) = 1. / r(k,k); for (j = 0; j <= k-1; ++j) { temp = r(k,k) * r(j,k); r(j,k) = 0.; r.col(k).head(j+1) -= r.col(j).head(j+1) * temp; } l = k; } /* form the full upper triangle of the inverse of (r transpose)*r */ /* in the full upper triangle of r. */ for (k = 0; k <= l; ++k) { for (j = 0; j <= k-1; ++j) r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k); r.col(k).head(k+1) *= r(k,k); } /* form the full lower triangle of the covariance matrix */ /* in the strict lower triangle of r and in wa. */ for (j = 0; j < n; ++j) { jj = ipvt[j]; sing = j > l; for (i = 0; i <= j; ++i) { if (sing) r(i,j) = 0.; ii = ipvt[i]; if (ii > jj) r(ii,jj) = r(i,j); if (ii < jj) r(jj,ii) = r(i,j); } wa[jj] = r(j,j); } /* symmetrize the covariance matrix in r. */ r.topLeftCorner(n,n).template triangularView() = r.topLeftCorner(n,n).transpose(); r.diagonal() = wa; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_LMCOVAR_H RcppEigen/inst/include/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h0000644000176200001440000001477012253717461026626 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // // This code initially comes from MINPACK whose original authors are: // Copyright Jorge More - Argonne National Laboratory // Copyright Burt Garbow - Argonne National Laboratory // Copyright Ken Hillstrom - Argonne National Laboratory // // This Source Code Form is subject to the terms of the Minpack license // (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. #ifndef EIGEN_LMONESTEP_H #define EIGEN_LMONESTEP_H namespace Eigen { template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeOneStep(FVectorType &x) { using std::abs; using std::sqrt; RealScalar temp, temp1,temp2; RealScalar ratio; RealScalar pnorm, xnorm, fnorm1, actred, dirder, prered; eigen_assert(x.size()==n); // check the caller is not cheating us temp = 0.0; xnorm = 0.0; /* calculate the jacobian matrix. */ Index df_ret = m_functor.df(x, m_fjac); if (df_ret<0) return LevenbergMarquardtSpace::UserAsked; if (df_ret>0) // numerical diff, we evaluated the function df_ret times m_nfev += df_ret; else m_njev++; /* compute the qr factorization of the jacobian. */ for (int j = 0; j < x.size(); ++j) m_wa2(j) = m_fjac.col(j).blueNorm(); QRSolver qrfac(m_fjac); if(qrfac.info() != Success) { m_info = NumericalIssue; return LevenbergMarquardtSpace::ImproperInputParameters; } // Make a copy of the first factor with the associated permutation m_rfactor = qrfac.matrixR(); m_permutation = (qrfac.colsPermutation()); /* on the first iteration and if external scaling is not used, scale according */ /* to the norms of the columns of the initial jacobian. */ if (m_iter == 1) { if (!m_useExternalScaling) for (Index j = 0; j < n; ++j) m_diag[j] = (m_wa2[j]==0.)? 1. : m_wa2[j]; /* on the first iteration, calculate the norm of the scaled x */ /* and initialize the step bound m_delta. */ xnorm = m_diag.cwiseProduct(x).stableNorm(); m_delta = m_factor * xnorm; if (m_delta == 0.) m_delta = m_factor; } /* form (q transpose)*m_fvec and store the first n components in */ /* m_qtf. */ m_wa4 = m_fvec; m_wa4 = qrfac.matrixQ().adjoint() * m_fvec; m_qtf = m_wa4.head(n); /* compute the norm of the scaled gradient. */ m_gnorm = 0.; if (m_fnorm != 0.) for (Index j = 0; j < n; ++j) if (m_wa2[m_permutation.indices()[j]] != 0.) m_gnorm = (std::max)(m_gnorm, abs( m_rfactor.col(j).head(j+1).dot(m_qtf.head(j+1)/m_fnorm) / m_wa2[m_permutation.indices()[j]])); /* test for convergence of the gradient norm. */ if (m_gnorm <= m_gtol) { m_info = Success; return LevenbergMarquardtSpace::CosinusTooSmall; } /* rescale if necessary. */ if (!m_useExternalScaling) m_diag = m_diag.cwiseMax(m_wa2); do { /* determine the levenberg-marquardt parameter. */ internal::lmpar2(qrfac, m_diag, m_qtf, m_delta, m_par, m_wa1); /* store the direction p and x + p. calculate the norm of p. */ m_wa1 = -m_wa1; m_wa2 = x + m_wa1; pnorm = m_diag.cwiseProduct(m_wa1).stableNorm(); /* on the first iteration, adjust the initial step bound. */ if (m_iter == 1) m_delta = (std::min)(m_delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( m_functor(m_wa2, m_wa4) < 0) return LevenbergMarquardtSpace::UserAsked; ++m_nfev; fnorm1 = m_wa4.stableNorm(); /* compute the scaled actual reduction. */ actred = -1.; if (Scalar(.1) * fnorm1 < m_fnorm) actred = 1. - numext::abs2(fnorm1 / m_fnorm); /* compute the scaled predicted reduction and */ /* the scaled directional derivative. */ m_wa3 = m_rfactor.template triangularView() * (m_permutation.inverse() *m_wa1); temp1 = numext::abs2(m_wa3.stableNorm() / m_fnorm); temp2 = numext::abs2(sqrt(m_par) * pnorm / m_fnorm); prered = temp1 + temp2 / Scalar(.5); dirder = -(temp1 + temp2); /* compute the ratio of the actual to the predicted */ /* reduction. */ ratio = 0.; if (prered != 0.) ratio = actred / prered; /* update the step bound. */ if (ratio <= Scalar(.25)) { if (actred >= 0.) temp = RealScalar(.5); if (actred < 0.) temp = RealScalar(.5) * dirder / (dirder + RealScalar(.5) * actred); if (RealScalar(.1) * fnorm1 >= m_fnorm || temp < RealScalar(.1)) temp = Scalar(.1); /* Computing MIN */ m_delta = temp * (std::min)(m_delta, pnorm / RealScalar(.1)); m_par /= temp; } else if (!(m_par != 0. && ratio < RealScalar(.75))) { m_delta = pnorm / RealScalar(.5); m_par = RealScalar(.5) * m_par; } /* test for successful iteration. */ if (ratio >= RealScalar(1e-4)) { /* successful iteration. update x, m_fvec, and their norms. */ x = m_wa2; m_wa2 = m_diag.cwiseProduct(x); m_fvec = m_wa4; xnorm = m_wa2.stableNorm(); m_fnorm = fnorm1; ++m_iter; } /* tests for convergence. */ if (abs(actred) <= m_ftol && prered <= m_ftol && Scalar(.5) * ratio <= 1. && m_delta <= m_xtol * xnorm) { m_info = Success; return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall; } if (abs(actred) <= m_ftol && prered <= m_ftol && Scalar(.5) * ratio <= 1.) { m_info = Success; return LevenbergMarquardtSpace::RelativeReductionTooSmall; } if (m_delta <= m_xtol * xnorm) { m_info = Success; return LevenbergMarquardtSpace::RelativeErrorTooSmall; } /* tests for termination and stringent tolerances. */ if (m_nfev >= m_maxfev) { m_info = NoConvergence; return LevenbergMarquardtSpace::TooManyFunctionEvaluation; } if (abs(actred) <= NumTraits::epsilon() && prered <= NumTraits::epsilon() && Scalar(.5) * ratio <= 1.) { m_info = Success; return LevenbergMarquardtSpace::FtolTooSmall; } if (m_delta <= NumTraits::epsilon() * xnorm) { m_info = Success; return LevenbergMarquardtSpace::XtolTooSmall; } if (m_gnorm <= NumTraits::epsilon()) { m_info = Success; return LevenbergMarquardtSpace::GtolTooSmall; } } while (ratio < Scalar(1e-4)); return LevenbergMarquardtSpace::Running; } } // end namespace Eigen #endif // EIGEN_LMONESTEP_H RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/0000755000176200001440000000000012647570720024260 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h0000644000176200001440000001240312305236565030307 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud /* NOTE The functions of this file have been adapted from the GMM++ library */ //======================================================================== // // Copyright (C) 2002-2007 Yves Renard // // This file is a part of GETFEM++ // // Getfem++ is free software; you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as // published by the Free Software Foundation; version 2.1 of the License. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this program; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, // USA. // //======================================================================== #include "../../../../Eigen/src/Core/util/NonMPL2.h" #ifndef EIGEN_CONSTRAINEDCG_H #define EIGEN_CONSTRAINEDCG_H #include namespace Eigen { namespace internal { /** \ingroup IterativeSolvers_Module * Compute the pseudo inverse of the non-square matrix C such that * \f$ CINV = (C * C^T)^{-1} * C \f$ based on a conjugate gradient method. * * This function is internally used by constrained_cg. */ template void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV) { // optimisable : copie de la ligne, precalcul de C * trans(C). typedef typename CMatrix::Scalar Scalar; typedef typename CMatrix::Index Index; // FIXME use sparse vectors ? typedef Matrix TmpVec; Index rows = C.rows(), cols = C.cols(); TmpVec d(rows), e(rows), l(cols), p(rows), q(rows), r(rows); Scalar rho, rho_1, alpha; d.setZero(); typedef Triplet T; std::vector tripletList; for (Index i = 0; i < rows; ++i) { d[i] = 1.0; rho = 1.0; e.setZero(); r = d; p = d; while (rho >= 1e-38) { /* conjugate gradient to compute e */ /* which is the i-th row of inv(C * trans(C)) */ l = C.transpose() * p; q = C * l; alpha = rho / p.dot(q); e += alpha * p; r += -alpha * q; rho_1 = rho; rho = r.dot(r); p = (rho/rho_1) * p + r; } l = C.transpose() * e; // l is the i-th row of CINV // FIXME add a generic "prune/filter" expression for both dense and sparse object to sparse for (Index j=0; j void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x, const VectorB& b, const VectorF& f, IterationController &iter) { using std::sqrt; typedef typename TMatrix::Scalar Scalar; typedef typename TMatrix::Index Index; typedef Matrix TmpVec; Scalar rho = 1.0, rho_1, lambda, gamma; Index xSize = x.size(); TmpVec p(xSize), q(xSize), q2(xSize), r(xSize), old_z(xSize), z(xSize), memox(xSize); std::vector satured(C.rows()); p.setZero(); iter.setRhsNorm(sqrt(b.dot(b))); // gael vect_sp(PS, b, b) if (iter.rhsNorm() == 0.0) iter.setRhsNorm(1.0); SparseMatrix CINV(C.rows(), C.cols()); pseudo_inverse(C, CINV); while(true) { // computation of residual old_z = z; memox = x; r = b; r += A * -x; z = r; bool transition = false; for (Index i = 0; i < C.rows(); ++i) { Scalar al = C.row(i).dot(x) - f.coeff(i); if (al >= -1.0E-15) { if (!satured[i]) { satured[i] = true; transition = true; } Scalar bb = CINV.row(i).dot(z); if (bb > 0.0) // FIXME: we should allow that: z += -bb * C.row(i); for (typename CMatrix::InnerIterator it(C,i); it; ++it) z.coeffRef(it.index()) -= bb*it.value(); } else satured[i] = false; } // descent direction rho_1 = rho; rho = r.dot(z); if (iter.finished(rho)) break; if (iter.noiseLevel() > 0 && transition) std::cerr << "CCG: transition\n"; if (transition || iter.first()) gamma = 0.0; else gamma = (std::max)(0.0, (rho - old_z.dot(z)) / rho_1); p = z + gamma*p; ++iter; // one dimensionnal optimization q = A * p; lambda = rho / q.dot(p); for (Index i = 0; i < C.rows(); ++i) { if (!satured[i]) { Scalar bb = C.row(i).dot(p) - f[i]; if (bb > 0.0) lambda = (std::min)(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb); } } x += lambda * p; memox -= x; } } } // end namespace internal } // end namespace Eigen #endif // EIGEN_CONSTRAINEDCG_H RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/IterationController.h0000644000176200001440000001235212253717461030434 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud /* NOTE The class IterationController has been adapted from the iteration * class of the GMM++ and ITL libraries. */ //======================================================================= // Copyright (C) 1997-2001 // Authors: Andrew Lumsdaine // Lie-Quan Lee // // This file is part of the Iterative Template Library // // You should have received a copy of the License Agreement for the // Iterative Template Library along with the software; see the // file LICENSE. // // Permission to modify the code and to distribute modified code is // granted, provided the text of this NOTICE is retained, a notice that // the code was modified is included with the above COPYRIGHT NOTICE and // with the COPYRIGHT NOTICE in the LICENSE file, and that the LICENSE // file is distributed with the modified code. // // LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. // By way of example, but not limitation, Licensor MAKES NO // REPRESENTATIONS OR WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY // PARTICULAR PURPOSE OR THAT THE USE OF THE LICENSED SOFTWARE COMPONENTS // OR DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS, TRADEMARKS // OR OTHER RIGHTS. //======================================================================= //======================================================================== // // Copyright (C) 2002-2007 Yves Renard // // This file is a part of GETFEM++ // // Getfem++ is free software; you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as // published by the Free Software Foundation; version 2.1 of the License. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this program; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, // USA. // //======================================================================== #include "../../../../Eigen/src/Core/util/NonMPL2.h" #ifndef EIGEN_ITERATION_CONTROLLER_H #define EIGEN_ITERATION_CONTROLLER_H namespace Eigen { /** \ingroup IterativeSolvers_Module * \class IterationController * * \brief Controls the iterations of the iterative solvers * * This class has been adapted from the iteration class of GMM++ and ITL libraries. * */ class IterationController { protected : double m_rhsn; ///< Right hand side norm size_t m_maxiter; ///< Max. number of iterations int m_noise; ///< if noise > 0 iterations are printed double m_resmax; ///< maximum residual double m_resminreach, m_resadd; size_t m_nit; ///< iteration number double m_res; ///< last computed residual bool m_written; void (*m_callback)(const IterationController&); public : void init() { m_nit = 0; m_res = 0.0; m_written = false; m_resminreach = 1E50; m_resadd = 0.0; m_callback = 0; } IterationController(double r = 1.0E-8, int noi = 0, size_t mit = size_t(-1)) : m_rhsn(1.0), m_maxiter(mit), m_noise(noi), m_resmax(r) { init(); } void operator ++(int) { m_nit++; m_written = false; m_resadd += m_res; } void operator ++() { (*this)++; } bool first() { return m_nit == 0; } /* get/set the "noisyness" (verbosity) of the solvers */ int noiseLevel() const { return m_noise; } void setNoiseLevel(int n) { m_noise = n; } void reduceNoiseLevel() { if (m_noise > 0) m_noise--; } double maxResidual() const { return m_resmax; } void setMaxResidual(double r) { m_resmax = r; } double residual() const { return m_res; } /* change the user-definable callback, called after each iteration */ void setCallback(void (*t)(const IterationController&)) { m_callback = t; } size_t iteration() const { return m_nit; } void setIteration(size_t i) { m_nit = i; } size_t maxIterarions() const { return m_maxiter; } void setMaxIterations(size_t i) { m_maxiter = i; } double rhsNorm() const { return m_rhsn; } void setRhsNorm(double r) { m_rhsn = r; } bool converged() const { return m_res <= m_rhsn * m_resmax; } bool converged(double nr) { using std::abs; m_res = abs(nr); m_resminreach = (std::min)(m_resminreach, m_res); return converged(); } template bool converged(const VectorType &v) { return converged(v.squaredNorm()); } bool finished(double nr) { if (m_callback) m_callback(*this); if (m_noise > 0 && !m_written) { converged(nr); m_written = true; } return (m_nit >= m_maxiter || converged(nr)); } template bool finished(const MatrixBase &v) { return finished(double(v.squaredNorm())); } }; } // end namespace Eigen #endif // EIGEN_ITERATION_CONTROLLER_H RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/GMRES.h0000644000176200001440000003010712647570720025307 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // Copyright (C) 2012, 2014 Kolja Brix // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GMRES_H #define EIGEN_GMRES_H namespace Eigen { namespace internal { /** * Generalized Minimal Residual Algorithm based on the * Arnoldi algorithm implemented with Householder reflections. * * Parameters: * \param mat matrix of linear system of equations * \param Rhs right hand side vector of linear system of equations * \param x on input: initial guess, on output: solution * \param precond preconditioner used * \param iters on input: maximum number of iterations to perform * on output: number of iterations performed * \param restart number of iterations for a restart * \param tol_error on input: residual tolerance * on output: residuum achieved * * \sa IterativeMethods::bicgstab() * * * For references, please see: * * Saad, Y. and Schultz, M. H. * GMRES: A Generalized Minimal Residual Algorithm for Solving Nonsymmetric Linear Systems. * SIAM J.Sci.Stat.Comp. 7, 1986, pp. 856 - 869. * * Saad, Y. * Iterative Methods for Sparse Linear Systems. * Society for Industrial and Applied Mathematics, Philadelphia, 2003. * * Walker, H. F. * Implementations of the GMRES method. * Comput.Phys.Comm. 53, 1989, pp. 311 - 320. * * Walker, H. F. * Implementation of the GMRES Method using Householder Transformations. * SIAM J.Sci.Stat.Comp. 9, 1988, pp. 152 - 163. * */ template bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond, int &iters, const int &restart, typename Dest::RealScalar & tol_error) { using std::sqrt; using std::abs; typedef typename Dest::RealScalar RealScalar; typedef typename Dest::Scalar Scalar; typedef Matrix < Scalar, Dynamic, 1 > VectorType; typedef Matrix < Scalar, Dynamic, Dynamic > FMatrixType; RealScalar tol = tol_error; const int maxIters = iters; iters = 0; const int m = mat.rows(); VectorType p0 = rhs - mat*x; VectorType r0 = precond.solve(p0); // is initial guess already good enough? if(abs(r0.norm()) < tol) { return true; } VectorType w = VectorType::Zero(restart + 1); FMatrixType H = FMatrixType::Zero(m, restart + 1); // Hessenberg matrix VectorType tau = VectorType::Zero(restart + 1); std::vector < JacobiRotation < Scalar > > G(restart); // generate first Householder vector VectorType e(m-1); RealScalar beta; r0.makeHouseholder(e, tau.coeffRef(0), beta); w(0)=(Scalar) beta; H.bottomLeftCorner(m - 1, 1) = e; for (int k = 1; k <= restart; ++k) { ++iters; VectorType v = VectorType::Unit(m, k - 1), workspace(m); // apply Householder reflections H_{1} ... H_{k-1} to v for (int i = k - 1; i >= 0; --i) { v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); } // apply matrix M to v: v = mat * v; VectorType t=mat*v; v=precond.solve(t); // apply Householder reflections H_{k-1} ... H_{1} to v for (int i = 0; i < k; ++i) { v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); } if (v.tail(m - k).norm() != 0.0) { if (k <= restart) { // generate new Householder vector VectorType e(m - k - 1); RealScalar beta; v.tail(m - k).makeHouseholder(e, tau.coeffRef(k), beta); H.col(k).tail(m - k - 1) = e; // apply Householder reflection H_{k} to v v.tail(m - k).applyHouseholderOnTheLeft(H.col(k).tail(m - k - 1), tau.coeffRef(k), workspace.data()); } } if (k > 1) { for (int i = 0; i < k - 1; ++i) { // apply old Givens rotations to v v.applyOnTheLeft(i, i + 1, G[i].adjoint()); } } if (k ().solveInPlace(y); // use Horner-like scheme to calculate solution vector VectorType x_new = y(k - 1) * VectorType::Unit(m, k - 1); // apply Householder reflection H_{k} to x_new x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data()); for (int i = k - 2; i >= 0; --i) { x_new += y(i) * VectorType::Unit(m, i); // apply Householder reflection H_{i} to x_new x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); } x += x_new; if (stop) { return true; } else { k=0; // reset data for a restart r0 = rhs - mat * x; VectorType p0=mat*x; VectorType p1=precond.solve(p0); r0 = rhs - p1; // r0_sqnorm = r0.squaredNorm(); w = VectorType::Zero(restart + 1); H = FMatrixType::Zero(m, restart + 1); tau = VectorType::Zero(restart + 1); // generate first Householder vector RealScalar beta; r0.makeHouseholder(e, tau.coeffRef(0), beta); w(0)=(Scalar) beta; H.bottomLeftCorner(m - 1, 1) = e; } } } return false; } } template< typename _MatrixType, typename _Preconditioner = DiagonalPreconditioner > class GMRES; namespace internal { template< typename _MatrixType, typename _Preconditioner> struct traits > { typedef _MatrixType MatrixType; typedef _Preconditioner Preconditioner; }; } /** \ingroup IterativeLinearSolvers_Module * \brief A GMRES solver for sparse square problems * * This class allows to solve for A.x = b sparse linear problems using a generalized minimal * residual method. The vectors x and b can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner * * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations * and NumTraits::epsilon() for the tolerance. * * This class can be used as the direct solver classes. Here is a typical usage example: * \code * int n = 10000; * VectorXd x(n), b(n); * SparseMatrix A(n,n); * // fill A and b * GMRES > solver(A); * x = solver.solve(b); * std::cout << "#iterations: " << solver.iterations() << std::endl; * std::cout << "estimated error: " << solver.error() << std::endl; * // update b, and solve again * x = solver.solve(b); * \endcode * * By default the iterations start with x=0 as an initial guess of the solution. * One can control the start using the solveWithGuess() method. * * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ template< typename _MatrixType, typename _Preconditioner> class GMRES : public IterativeSolverBase > { typedef IterativeSolverBase Base; using Base::mp_matrix; using Base::m_error; using Base::m_iterations; using Base::m_info; using Base::m_isInitialized; private: int m_restart; public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; public: /** Default constructor. */ GMRES() : Base(), m_restart(30) {} /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template explicit GMRES(const EigenBase& A) : Base(A.derived()), m_restart(30) {} ~GMRES() {} /** Get the number of iterations after that a restart is performed. */ int get_restart() { return m_restart; } /** Set the number of iterations after that a restart is performed. * \param restart number of iterations for a restarti, default is 30. */ void set_restart(const int restart) { m_restart=restart; } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A * \a x0 as an initial solution. * * \sa compute() */ template inline const internal::solve_retval_with_guess solveWithGuess(const MatrixBase& b, const Guess& x0) const { eigen_assert(m_isInitialized && "GMRES is not initialized."); eigen_assert(Base::rows()==b.rows() && "GMRES::solve(): invalid number of rows of the right hand side matrix b"); return internal::solve_retval_with_guess (*this, b.derived(), x0); } /** \internal */ template void _solveWithGuess(const Rhs& b, Dest& x) const { bool failed = false; for(int j=0; j void _solve(const Rhs& b, Dest& x) const { x = b; if(x.squaredNorm() == 0) return; // Check Zero right hand side _solveWithGuess(b,x); } protected: }; namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef GMRES<_MatrixType, _Preconditioner> Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { dec()._solve(rhs(),dst); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GMRES_H RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/Scaling.h0000644000176200001440000001314412253717461026012 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Desire NUENTSA WAKAM A; * // fill A and b; * IterScaling > scal; * // Compute the left and right scaling vectors. The matrix is equilibrated at output * scal.computeRef(A); * // Scale the right hand side * b = scal.LeftScaling().cwiseProduct(b); * // Now, solve the equilibrated linear system with any available solver * * // Scale back the computed solution * x = scal.RightScaling().cwiseProduct(x); * \endcode * * \tparam _MatrixType the type of the matrix. It should be a real square sparsematrix * * References : D. Ruiz and B. Ucar, A Symmetry Preserving Algorithm for Matrix Scaling, INRIA Research report RR-7552 * * \sa \ref IncompleteLUT */ namespace Eigen { using std::abs; template class IterScaling { public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; public: IterScaling() { init(); } IterScaling(const MatrixType& matrix) { init(); compute(matrix); } ~IterScaling() { } /** * Compute the left and right diagonal matrices to scale the input matrix @p mat * * FIXME This algorithm will be modified such that the diagonal elements are permuted on the diagonal. * * \sa LeftScaling() RightScaling() */ void compute (const MatrixType& mat) { int m = mat.rows(); int n = mat.cols(); eigen_assert((m>0 && m == n) && "Please give a non - empty matrix"); m_left.resize(m); m_right.resize(n); m_left.setOnes(); m_right.setOnes(); m_matrix = mat; VectorXd Dr, Dc, DrRes, DcRes; // Temporary Left and right scaling vectors Dr.resize(m); Dc.resize(n); DrRes.resize(m); DcRes.resize(n); double EpsRow = 1.0, EpsCol = 1.0; int its = 0; do { // Iterate until the infinite norm of each row and column is approximately 1 // Get the maximum value in each row and column Dr.setZero(); Dc.setZero(); for (int k=0; km_tol || EpsCol > m_tol) && (its < m_maxits) ); m_isInitialized = true; } /** Compute the left and right vectors to scale the vectors * the input matrix is scaled with the computed vectors at output * * \sa compute() */ void computeRef (MatrixType& mat) { compute (mat); mat = m_matrix; } /** Get the vector to scale the rows of the matrix */ VectorXd& LeftScaling() { return m_left; } /** Get the vector to scale the columns of the matrix */ VectorXd& RightScaling() { return m_right; } /** Set the tolerance for the convergence of the iterative scaling algorithm */ void setTolerance(double tol) { m_tol = tol; } protected: void init() { m_tol = 1e-10; m_maxits = 5; m_isInitialized = false; } MatrixType m_matrix; mutable ComputationInfo m_info; bool m_isInitialized; VectorXd m_left; // Left scaling vector VectorXd m_right; // m_right scaling vector double m_tol; int m_maxits; // Maximum number of iterations allowed }; } #endif RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/DGMRES.h0000644000176200001440000004433712647570720025425 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DGMRES_H #define EIGEN_DGMRES_H #include namespace Eigen { template< typename _MatrixType, typename _Preconditioner = DiagonalPreconditioner > class DGMRES; namespace internal { template< typename _MatrixType, typename _Preconditioner> struct traits > { typedef _MatrixType MatrixType; typedef _Preconditioner Preconditioner; }; /** \brief Computes a permutation vector to have a sorted sequence * \param vec The vector to reorder. * \param perm gives the sorted sequence on output. Must be initialized with 0..n-1 * \param ncut Put the ncut smallest elements at the end of the vector * WARNING This is an expensive sort, so should be used only * for small size vectors * TODO Use modified QuickSplit or std::nth_element to get the smallest values */ template void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::Scalar& ncut) { eigen_assert(vec.size() == perm.size()); typedef typename IndexType::Scalar Index; typedef typename VectorType::Scalar Scalar; bool flag; for (Index k = 0; k < ncut; k++) { flag = false; for (Index j = 0; j < vec.size()-1; j++) { if ( vec(perm(j)) < vec(perm(j+1)) ) { std::swap(perm(j),perm(j+1)); flag = true; } if (!flag) break; // The vector is in sorted order } } } } /** * \ingroup IterativeLInearSolvers_Module * \brief A Restarted GMRES with deflation. * This class implements a modification of the GMRES solver for * sparse linear systems. The basis is built with modified * Gram-Schmidt. At each restart, a few approximated eigenvectors * corresponding to the smallest eigenvalues are used to build a * preconditioner for the next cycle. This preconditioner * for deflation can be combined with any other preconditioner, * the IncompleteLUT for instance. The preconditioner is applied * at right of the matrix and the combination is multiplicative. * * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner * Typical usage : * \code * SparseMatrix A; * VectorXd x, b; * //Fill A and b ... * DGMRES > solver; * solver.set_restart(30); // Set restarting value * solver.setEigenv(1); // Set the number of eigenvalues to deflate * solver.compute(A); * x = solver.solve(b); * \endcode * * References : * [1] D. NUENTSA WAKAM and F. PACULL, Memory Efficient Hybrid * Algebraic Solvers for Linear Systems Arising from Compressible * Flows, Computers and Fluids, In Press, * http://dx.doi.org/10.1016/j.compfluid.2012.03.023 * [2] K. Burrage and J. Erhel, On the performance of various * adaptive preconditioned GMRES strategies, 5(1998), 101-121. * [3] J. Erhel, K. Burrage and B. Pohl, Restarted GMRES * preconditioned by deflation,J. Computational and Applied * Mathematics, 69(1996), 303-318. * */ template< typename _MatrixType, typename _Preconditioner> class DGMRES : public IterativeSolverBase > { typedef IterativeSolverBase Base; using Base::mp_matrix; using Base::m_error; using Base::m_iterations; using Base::m_info; using Base::m_isInitialized; using Base::m_tolerance; public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; typedef Matrix DenseMatrix; typedef Matrix DenseRealMatrix; typedef Matrix DenseVector; typedef Matrix DenseRealVector; typedef Matrix, Dynamic, 1> ComplexVector; /** Default constructor. */ DGMRES() : Base(),m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {} /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template explicit DGMRES(const EigenBase& A) : Base(A.derived()), m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {} ~DGMRES() {} /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A * \a x0 as an initial solution. * * \sa compute() */ template inline const internal::solve_retval_with_guess solveWithGuess(const MatrixBase& b, const Guess& x0) const { eigen_assert(m_isInitialized && "DGMRES is not initialized."); eigen_assert(Base::rows()==b.rows() && "DGMRES::solve(): invalid number of rows of the right hand side matrix b"); return internal::solve_retval_with_guess (*this, b.derived(), x0); } /** \internal */ template void _solveWithGuess(const Rhs& b, Dest& x) const { bool failed = false; for(int j=0; j void _solve(const Rhs& b, Dest& x) const { x = b; _solveWithGuess(b,x); } /** * Get the restart value */ int restart() { return m_restart; } /** * Set the restart value (default is 30) */ void set_restart(const int restart) { m_restart=restart; } /** * Set the number of eigenvalues to deflate at each restart */ void setEigenv(const int neig) { m_neig = neig; if (neig+1 > m_maxNeig) m_maxNeig = neig+1; // To allow for complex conjugates } /** * Get the size of the deflation subspace size */ int deflSize() {return m_r; } /** * Set the maximum size of the deflation subspace */ void setMaxEigenv(const int maxNeig) { m_maxNeig = maxNeig; } protected: // DGMRES algorithm template void dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x, const Preconditioner& precond) const; // Perform one cycle of GMRES template int dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const; // Compute data to use for deflation int dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, Index& neig) const; // Apply deflation to a vector template int dgmresApplyDeflation(const RhsType& In, DestType& Out) const; ComplexVector schurValues(const ComplexSchur& schurofH) const; ComplexVector schurValues(const RealSchur& schurofH) const; // Init data for deflation void dgmresInitDeflation(Index& rows) const; mutable DenseMatrix m_V; // Krylov basis vectors mutable DenseMatrix m_H; // Hessenberg matrix mutable DenseMatrix m_Hes; // Initial hessenberg matrix wihout Givens rotations applied mutable Index m_restart; // Maximum size of the Krylov subspace mutable DenseMatrix m_U; // Vectors that form the basis of the invariant subspace mutable DenseMatrix m_MU; // matrix operator applied to m_U (for next cycles) mutable DenseMatrix m_T; /* T=U^T*M^{-1}*A*U */ mutable PartialPivLU m_luT; // LU factorization of m_T mutable int m_neig; //Number of eigenvalues to extract at each restart mutable int m_r; // Current number of deflated eigenvalues, size of m_U mutable int m_maxNeig; // Maximum number of eigenvalues to deflate mutable RealScalar m_lambdaN; //Modulus of the largest eigenvalue of A mutable bool m_isDeflAllocated; mutable bool m_isDeflInitialized; //Adaptive strategy mutable RealScalar m_smv; // Smaller multiple of the remaining number of steps allowed mutable bool m_force; // Force the use of deflation at each restart }; /** * \brief Perform several cycles of restarted GMRES with modified Gram Schmidt, * * A right preconditioner is used combined with deflation. * */ template< typename _MatrixType, typename _Preconditioner> template void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x, const Preconditioner& precond) const { //Initialization int n = mat.rows(); DenseVector r0(n); int nbIts = 0; m_H.resize(m_restart+1, m_restart); m_Hes.resize(m_restart, m_restart); m_V.resize(n,m_restart+1); //Initial residual vector and intial norm x = precond.solve(x); r0 = rhs - mat * x; RealScalar beta = r0.norm(); RealScalar normRhs = rhs.norm(); m_error = beta/normRhs; if(m_error < m_tolerance) m_info = Success; else m_info = NoConvergence; // Iterative process while (nbIts < m_iterations && m_info == NoConvergence) { dgmresCycle(mat, precond, x, r0, beta, normRhs, nbIts); // Compute the new residual vector for the restart if (nbIts < m_iterations && m_info == NoConvergence) r0 = rhs - mat * x; } } /** * \brief Perform one restart cycle of DGMRES * \param mat The coefficient matrix * \param precond The preconditioner * \param x the new approximated solution * \param r0 The initial residual vector * \param beta The norm of the residual computed so far * \param normRhs The norm of the right hand side vector * \param nbIts The number of iterations */ template< typename _MatrixType, typename _Preconditioner> template int DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const { //Initialization DenseVector g(m_restart+1); // Right hand side of the least square problem g.setZero(); g(0) = Scalar(beta); m_V.col(0) = r0/beta; m_info = NoConvergence; std::vector >gr(m_restart); // Givens rotations int it = 0; // Number of inner iterations int n = mat.rows(); DenseVector tv1(n), tv2(n); //Temporary vectors while (m_info == NoConvergence && it < m_restart && nbIts < m_iterations) { // Apply preconditioner(s) at right if (m_isDeflInitialized ) { dgmresApplyDeflation(m_V.col(it), tv1); // Deflation tv2 = precond.solve(tv1); } else { tv2 = precond.solve(m_V.col(it)); // User's selected preconditioner } tv1 = mat * tv2; // Orthogonalize it with the previous basis in the basis using modified Gram-Schmidt Scalar coef; for (int i = 0; i <= it; ++i) { coef = tv1.dot(m_V.col(i)); tv1 = tv1 - coef * m_V.col(i); m_H(i,it) = coef; m_Hes(i,it) = coef; } // Normalize the vector coef = tv1.norm(); m_V.col(it+1) = tv1/coef; m_H(it+1, it) = coef; // m_Hes(it+1,it) = coef; // FIXME Check for happy breakdown // Update Hessenberg matrix with Givens rotations for (int i = 1; i <= it; ++i) { m_H.col(it).applyOnTheLeft(i-1,i,gr[i-1].adjoint()); } // Compute the new plane rotation gr[it].makeGivens(m_H(it, it), m_H(it+1,it)); // Apply the new rotation m_H.col(it).applyOnTheLeft(it,it+1,gr[it].adjoint()); g.applyOnTheLeft(it,it+1, gr[it].adjoint()); beta = std::abs(g(it+1)); m_error = beta/normRhs; std::cerr << nbIts << " Relative Residual Norm " << m_error << std::endl; it++; nbIts++; if (m_error < m_tolerance) { // The method has converged m_info = Success; break; } } // Compute the new coefficients by solving the least square problem // it++; //FIXME Check first if the matrix is singular ... zero diagonal DenseVector nrs(m_restart); nrs = m_H.topLeftCorner(it,it).template triangularView().solve(g.head(it)); // Form the new solution if (m_isDeflInitialized) { tv1 = m_V.leftCols(it) * nrs; dgmresApplyDeflation(tv1, tv2); x = x + precond.solve(tv2); } else x = x + precond.solve(m_V.leftCols(it) * nrs); // Go for a new cycle and compute data for deflation if(nbIts < m_iterations && m_info == NoConvergence && m_neig > 0 && (m_r+m_neig) < m_maxNeig) dgmresComputeDeflationData(mat, precond, it, m_neig); return 0; } template< typename _MatrixType, typename _Preconditioner> void DGMRES<_MatrixType, _Preconditioner>::dgmresInitDeflation(Index& rows) const { m_U.resize(rows, m_maxNeig); m_MU.resize(rows, m_maxNeig); m_T.resize(m_maxNeig, m_maxNeig); m_lambdaN = 0.0; m_isDeflAllocated = true; } template< typename _MatrixType, typename _Preconditioner> inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const ComplexSchur& schurofH) const { return schurofH.matrixT().diagonal(); } template< typename _MatrixType, typename _Preconditioner> inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const RealSchur& schurofH) const { typedef typename MatrixType::Index Index; const DenseMatrix& T = schurofH.matrixT(); Index it = T.rows(); ComplexVector eig(it); Index j = 0; while (j < it-1) { if (T(j+1,j) ==Scalar(0)) { eig(j) = std::complex(T(j,j),RealScalar(0)); j++; } else { eig(j) = std::complex(T(j,j),T(j+1,j)); eig(j+1) = std::complex(T(j,j+1),T(j+1,j+1)); j++; } } if (j < it-1) eig(j) = std::complex(T(j,j),RealScalar(0)); return eig; } template< typename _MatrixType, typename _Preconditioner> int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, Index& neig) const { // First, find the Schur form of the Hessenberg matrix H typename internal::conditional::IsComplex, ComplexSchur, RealSchur >::type schurofH; bool computeU = true; DenseMatrix matrixQ(it,it); matrixQ.setIdentity(); schurofH.computeFromHessenberg(m_Hes.topLeftCorner(it,it), matrixQ, computeU); ComplexVector eig(it); Matrixperm(it); eig = this->schurValues(schurofH); // Reorder the absolute values of Schur values DenseRealVector modulEig(it); for (int j=0; j template int DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x, DestType &y) const { DenseVector x1 = m_U.leftCols(m_r).transpose() * x; y = x + m_U.leftCols(m_r) * ( m_lambdaN * m_luT.solve(x1) - x1); return 0; } namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef DGMRES<_MatrixType, _Preconditioner> Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { dec()._solve(rhs(),dst); } }; } // end namespace internal } // end namespace Eigen #endif RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h0000644000176200001440000000624212253717461026773 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_INCOMPLETE_LU_H #define EIGEN_INCOMPLETE_LU_H namespace Eigen { template class IncompleteLU { typedef _Scalar Scalar; typedef Matrix Vector; typedef typename Vector::Index Index; typedef SparseMatrix FactorType; public: typedef Matrix MatrixType; IncompleteLU() : m_isInitialized(false) {} template IncompleteLU(const MatrixType& mat) : m_isInitialized(false) { compute(mat); } Index rows() const { return m_lu.rows(); } Index cols() const { return m_lu.cols(); } template IncompleteLU& compute(const MatrixType& mat) { m_lu = mat; int size = mat.cols(); Vector diag(size); for(int i=0; i void _solve(const Rhs& b, Dest& x) const { x = m_lu.template triangularView().solve(b); x = m_lu.template triangularView().solve(x); } template inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(m_isInitialized && "IncompleteLU is not initialized."); eigen_assert(cols()==b.rows() && "IncompleteLU::solve(): invalid number of rows of the right hand side matrix b"); return internal::solve_retval(*this, b.derived()); } protected: FactorType m_lu; bool m_isInitialized; }; namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef IncompleteLU<_MatrixType> Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { dec()._solve(rhs(),dst); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_INCOMPLETE_LU_H RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h0000644000176200001440000002316612253717461030240 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_INCOMPLETE_CHOlESKY_H #define EIGEN_INCOMPLETE_CHOlESKY_H #include "Eigen/src/IterativeLinearSolvers/IncompleteLUT.h" #include #include namespace Eigen { /** * \brief Modified Incomplete Cholesky with dual threshold * * References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with * Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999 * * \tparam _MatrixType The type of the sparse matrix. It should be a symmetric * matrix. It is advised to give a row-oriented sparse matrix * \tparam _UpLo The triangular part of the matrix to reference. * \tparam _OrderingType */ template > class IncompleteCholesky : internal::noncopyable { public: typedef SparseMatrix MatrixType; typedef _OrderingType OrderingType; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef PermutationMatrix PermutationType; typedef Matrix ScalarType; typedef Matrix IndexType; typedef std::vector > VectorList; enum { UpLo = _UpLo }; public: IncompleteCholesky() : m_shift(1),m_factorizationIsOk(false) {} IncompleteCholesky(const MatrixType& matrix) : m_shift(1),m_factorizationIsOk(false) { compute(matrix); } Index rows() const { return m_L.rows(); } Index cols() const { return m_L.cols(); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "IncompleteLLT is not initialized."); return m_info; } /** * \brief Set the initial shift parameter */ void setShift( Scalar shift) { m_shift = shift; } /** * \brief Computes the fill reducing permutation vector. */ template void analyzePattern(const MatrixType& mat) { OrderingType ord; ord(mat.template selfadjointView(), m_perm); m_analysisIsOk = true; } template void factorize(const MatrixType& amat); template void compute (const MatrixType& matrix) { analyzePattern(matrix); factorize(matrix); } template void _solve(const Rhs& b, Dest& x) const { eigen_assert(m_factorizationIsOk && "factorize() should be called first"); if (m_perm.rows() == b.rows()) x = m_perm.inverse() * b; else x = b; x = m_scal.asDiagonal() * x; x = m_L.template triangularView().solve(x); x = m_L.adjoint().template triangularView().solve(x); if (m_perm.rows() == b.rows()) x = m_perm * x; x = m_scal.asDiagonal() * x; } template inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(m_factorizationIsOk && "IncompleteLLT did not succeed"); eigen_assert(m_isInitialized && "IncompleteLLT is not initialized."); eigen_assert(cols()==b.rows() && "IncompleteLLT::solve(): invalid number of rows of the right hand side matrix b"); return internal::solve_retval(*this, b.derived()); } protected: SparseMatrix m_L; // The lower part stored in CSC ScalarType m_scal; // The vector for scaling the matrix Scalar m_shift; //The initial shift parameter bool m_analysisIsOk; bool m_factorizationIsOk; bool m_isInitialized; ComputationInfo m_info; PermutationType m_perm; private: template inline void updateList(const IdxType& colPtr, IdxType& rowIdx, SclType& vals, const Index& col, const Index& jk, IndexType& firstElt, VectorList& listCol); }; template template void IncompleteCholesky::factorize(const _MatrixType& mat) { using std::sqrt; using std::min; eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); // Dropping strategies : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added // Apply the fill-reducing permutation computed in analyzePattern() if (m_perm.rows() == mat.rows() ) // To detect the null permutation m_L.template selfadjointView() = mat.template selfadjointView<_UpLo>().twistedBy(m_perm); else m_L.template selfadjointView() = mat.template selfadjointView<_UpLo>(); Index n = m_L.cols(); Index nnz = m_L.nonZeros(); Map vals(m_L.valuePtr(), nnz); //values Map rowIdx(m_L.innerIndexPtr(), nnz); //Row indices Map colPtr( m_L.outerIndexPtr(), n+1); // Pointer to the beginning of each row IndexType firstElt(n-1); // for each j, points to the next entry in vals that will be used in the factorization VectorList listCol(n); // listCol(j) is a linked list of columns to update column j ScalarType curCol(n); // Store a nonzero values in each column IndexType irow(n); // Row indices of nonzero elements in each column // Computes the scaling factors m_scal.resize(n); for (int j = 0; j < n; j++) { m_scal(j) = m_L.col(j).norm(); m_scal(j) = sqrt(m_scal(j)); } // Scale and compute the shift for the matrix Scalar mindiag = vals[0]; for (int j = 0; j < n; j++){ for (int k = colPtr[j]; k < colPtr[j+1]; k++) vals[k] /= (m_scal(j) * m_scal(rowIdx[k])); mindiag = (min)(vals[colPtr[j]], mindiag); } if(mindiag < Scalar(0.)) m_shift = m_shift - mindiag; // Apply the shift to the diagonal elements of the matrix for (int j = 0; j < n; j++) vals[colPtr[j]] += m_shift; // jki version of the Cholesky factorization for (int j=0; j < n; ++j) { //Left-looking factorize the column j // First, load the jth column into curCol Scalar diag = vals[colPtr[j]]; // It is assumed that only the lower part is stored curCol.setZero(); irow.setLinSpaced(n,0,n-1); for (int i = colPtr[j] + 1; i < colPtr[j+1]; i++) { curCol(rowIdx[i]) = vals[i]; irow(rowIdx[i]) = rowIdx[i]; } std::list::iterator k; // Browse all previous columns that will update column j for(k = listCol[j].begin(); k != listCol[j].end(); k++) { int jk = firstElt(*k); // First element to use in the column jk += 1; for (int i = jk; i < colPtr[*k+1]; i++) { curCol(rowIdx[i]) -= vals[i] * vals[jk] ; } updateList(colPtr,rowIdx,vals, *k, jk, firstElt, listCol); } // Scale the current column if(RealScalar(diag) <= 0) { std::cerr << "\nNegative diagonal during Incomplete factorization... "<< j << "\n"; m_info = NumericalIssue; return; } RealScalar rdiag = sqrt(RealScalar(diag)); vals[colPtr[j]] = rdiag; for (int i = j+1; i < n; i++) { //Scale curCol(i) /= rdiag; //Update the remaining diagonals with curCol vals[colPtr[i]] -= curCol(i) * curCol(i); } // Select the largest p elements // p is the original number of elements in the column (without the diagonal) int p = colPtr[j+1] - colPtr[j] - 1 ; internal::QuickSplit(curCol, irow, p); // Insert the largest p elements in the matrix int cpt = 0; for (int i = colPtr[j]+1; i < colPtr[j+1]; i++) { vals[i] = curCol(cpt); rowIdx[i] = irow(cpt); cpt ++; } // Get the first smallest row index and put it after the diagonal element Index jk = colPtr(j)+1; updateList(colPtr,rowIdx,vals,j,jk,firstElt,listCol); } m_factorizationIsOk = true; m_isInitialized = true; m_info = Success; } template template inline void IncompleteCholesky::updateList(const IdxType& colPtr, IdxType& rowIdx, SclType& vals, const Index& col, const Index& jk, IndexType& firstElt, VectorList& listCol) { if (jk < colPtr(col+1) ) { Index p = colPtr(col+1) - jk; Index minpos; rowIdx.segment(jk,p).minCoeff(&minpos); minpos += jk; if (rowIdx(minpos) != rowIdx(jk)) { //Swap std::swap(rowIdx(jk),rowIdx(minpos)); std::swap(vals(jk),vals(minpos)); } firstElt(col) = jk; listCol[rowIdx(jk)].push_back(col); } } namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef IncompleteCholesky<_Scalar, _UpLo, OrderingType> Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { dec()._solve(rhs(),dst); } }; } // end namespace internal } // end namespace Eigen #endif RcppEigen/inst/include/unsupported/Eigen/src/IterativeSolvers/MINRES.h0000644000176200001440000003271412647570720025435 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Giacomo Po // Copyright (C) 2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MINRES_H_ #define EIGEN_MINRES_H_ namespace Eigen { namespace internal { /** \internal Low-level MINRES algorithm * \param mat The matrix A * \param rhs The right hand side vector b * \param x On input and initial solution, on output the computed solution. * \param precond A right preconditioner being able to efficiently solve for an * approximation of Ax=b (regardless of b) * \param iters On input the max number of iteration, on output the number of performed iterations. * \param tol_error On input the tolerance error, on output an estimation of the relative error. */ template EIGEN_DONT_INLINE void minres(const MatrixType& mat, const Rhs& rhs, Dest& x, const Preconditioner& precond, int& iters, typename Dest::RealScalar& tol_error) { using std::sqrt; typedef typename Dest::RealScalar RealScalar; typedef typename Dest::Scalar Scalar; typedef Matrix VectorType; // Check for zero rhs const RealScalar rhsNorm2(rhs.squaredNorm()); if(rhsNorm2 == 0) { x.setZero(); iters = 0; tol_error = 0; return; } // initialize const int maxIters(iters); // initialize maxIters to iters const int N(mat.cols()); // the size of the matrix const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2) // Initialize preconditioned Lanczos VectorType v_old(N); // will be initialized inside loop VectorType v( VectorType::Zero(N) ); //initialize v VectorType v_new(rhs-mat*x); //initialize v_new RealScalar residualNorm2(v_new.squaredNorm()); VectorType w(N); // will be initialized inside loop VectorType w_new(precond.solve(v_new)); // initialize w_new // RealScalar beta; // will be initialized inside loop RealScalar beta_new2(v_new.dot(w_new)); eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); RealScalar beta_new(sqrt(beta_new2)); const RealScalar beta_one(beta_new); v_new /= beta_new; w_new /= beta_new; // Initialize other variables RealScalar c(1.0); // the cosine of the Givens rotation RealScalar c_old(1.0); RealScalar s(0.0); // the sine of the Givens rotation RealScalar s_old(0.0); // the sine of the Givens rotation VectorType p_oold(N); // will be initialized in loop VectorType p_old(VectorType::Zero(N)); // initialize p_old=0 VectorType p(p_old); // initialize p=0 RealScalar eta(1.0); iters = 0; // reset iters while ( iters < maxIters ) { // Preconditioned Lanczos /* Note that there are 4 variants on the Lanczos algorithm. These are * described in Paige, C. C. (1972). Computational variants of * the Lanczos method for the eigenproblem. IMA Journal of Applied * Mathematics, 10(3), 373–381. The current implementation corresponds * to the case A(2,7) in the paper. It also corresponds to * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear * Systems, 2003 p.173. For the preconditioned version see * A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987). */ const RealScalar beta(beta_new); v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter // const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT v = v_new; // update w = w_new; // update // const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT v_new.noalias() = mat*w - beta*v_old; // compute v_new const RealScalar alpha = v_new.dot(w); v_new -= alpha*v; // overwrite v_new w_new = precond.solve(v_new); // overwrite w_new beta_new2 = v_new.dot(w_new); // compute beta_new eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); beta_new = sqrt(beta_new2); // compute beta_new v_new /= beta_new; // overwrite v_new for next iteration w_new /= beta_new; // overwrite w_new for next iteration // Givens rotation const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration const RealScalar r3 =s_old*beta; // s, s_old, c and c_old are still from previous iteration const RealScalar r1_hat=c*alpha-c_old*s*beta; const RealScalar r1 =sqrt( std::pow(r1_hat,2) + std::pow(beta_new,2) ); c_old = c; // store for next iteration s_old = s; // store for next iteration c=r1_hat/r1; // new cosine s=beta_new/r1; // new sine // Update solution p_oold = p_old; // const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT p_old = p; p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED? x += beta_one*c*eta*p; /* Update the squared residual. Note that this is the estimated residual. The real residual |Ax-b|^2 may be slightly larger */ residualNorm2 *= s*s; if ( residualNorm2 < threshold2) { break; } eta=-s*eta; // update eta iters++; // increment iteration number (for output purposes) } /* Compute error. Note that this is the estimated error. The real error |Ax-b|/|b| may be slightly larger */ tol_error = std::sqrt(residualNorm2 / rhsNorm2); } } template< typename _MatrixType, int _UpLo=Lower, typename _Preconditioner = IdentityPreconditioner> // typename _Preconditioner = IdentityPreconditioner > // preconditioner must be positive definite class MINRES; namespace internal { template< typename _MatrixType, int _UpLo, typename _Preconditioner> struct traits > { typedef _MatrixType MatrixType; typedef _Preconditioner Preconditioner; }; } /** \ingroup IterativeLinearSolvers_Module * \brief A minimal residual solver for sparse symmetric problems * * This class allows to solve for A.x = b sparse linear problems using the MINRES algorithm * of Paige and Saunders (1975). The sparse matrix A must be symmetric (possibly indefinite). * The vectors x and b can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner * * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations * and NumTraits::epsilon() for the tolerance. * * This class can be used as the direct solver classes. Here is a typical usage example: * \code * int n = 10000; * VectorXd x(n), b(n); * SparseMatrix A(n,n); * // fill A and b * MINRES > mr; * mr.compute(A); * x = mr.solve(b); * std::cout << "#iterations: " << mr.iterations() << std::endl; * std::cout << "estimated error: " << mr.error() << std::endl; * // update b, and solve again * x = mr.solve(b); * \endcode * * By default the iterations start with x=0 as an initial guess of the solution. * One can control the start using the solveWithGuess() method. * * \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ template< typename _MatrixType, int _UpLo, typename _Preconditioner> class MINRES : public IterativeSolverBase > { typedef IterativeSolverBase Base; using Base::mp_matrix; using Base::m_error; using Base::m_iterations; using Base::m_info; using Base::m_isInitialized; public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; enum {UpLo = _UpLo}; public: /** Default constructor. */ MINRES() : Base() {} /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template explicit MINRES(const EigenBase& A) : Base(A.derived()) {} /** Destructor. */ ~MINRES(){} /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A * \a x0 as an initial solution. * * \sa compute() */ template inline const internal::solve_retval_with_guess solveWithGuess(const MatrixBase& b, const Guess& x0) const { eigen_assert(m_isInitialized && "MINRES is not initialized."); eigen_assert(Base::rows()==b.rows() && "MINRES::solve(): invalid number of rows of the right hand side matrix b"); return internal::solve_retval_with_guess (*this, b.derived(), x0); } /** \internal */ template void _solveWithGuess(const Rhs& b, Dest& x) const { typedef typename internal::conditional >::type MatrixWrapperType; m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; for(int j=0; j void _solve(const Rhs& b, Dest& x) const { x.setZero(); _solveWithGuess(b,x); } protected: }; namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef MINRES<_MatrixType,_UpLo,_Preconditioner> Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { dec()._solve(rhs(),dst); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_MINRES_H RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/0000755000176200001440000000000012446006401024066 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/StemFunction.h0000644000176200001440000000400512253717461026667 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STEM_FUNCTION #define EIGEN_STEM_FUNCTION namespace Eigen { /** \ingroup MatrixFunctions_Module * \brief Stem functions corresponding to standard mathematical functions. */ template class StdStemFunctions { public: /** \brief The exponential function (and its derivatives). */ static Scalar exp(Scalar x, int) { return std::exp(x); } /** \brief Cosine (and its derivatives). */ static Scalar cos(Scalar x, int n) { Scalar res; switch (n % 4) { case 0: res = std::cos(x); break; case 1: res = -std::sin(x); break; case 2: res = -std::cos(x); break; case 3: res = std::sin(x); break; } return res; } /** \brief Sine (and its derivatives). */ static Scalar sin(Scalar x, int n) { Scalar res; switch (n % 4) { case 0: res = std::sin(x); break; case 1: res = std::cos(x); break; case 2: res = -std::sin(x); break; case 3: res = -std::cos(x); break; } return res; } /** \brief Hyperbolic cosine (and its derivatives). */ static Scalar cosh(Scalar x, int n) { Scalar res; switch (n % 2) { case 0: res = std::cosh(x); break; case 1: res = std::sinh(x); break; } return res; } /** \brief Hyperbolic sine (and its derivatives). */ static Scalar sinh(Scalar x, int n) { Scalar res; switch (n % 2) { case 0: res = std::sinh(x); break; case 1: res = std::cosh(x); break; } return res; } }; // end of class StdStemFunctions } // end namespace Eigen #endif // EIGEN_STEM_FUNCTION RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h0000644000176200001440000003521212253717461027730 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009, 2010 Jitse Niesen // Copyright (C) 2011 Chen-Pang He // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_EXPONENTIAL #define EIGEN_MATRIX_EXPONENTIAL #include "StemFunction.h" namespace Eigen { /** \ingroup MatrixFunctions_Module * \brief Class for computing the matrix exponential. * \tparam MatrixType type of the argument of the exponential, * expected to be an instantiation of the Matrix class template. */ template class MatrixExponential { public: /** \brief Constructor. * * The class stores a reference to \p M, so it should not be * changed (or destroyed) before compute() is called. * * \param[in] M matrix whose exponential is to be computed. */ MatrixExponential(const MatrixType &M); /** \brief Computes the matrix exponential. * * \param[out] result the matrix exponential of \p M in the constructor. */ template void compute(ResultType &result); private: // Prevent copying MatrixExponential(const MatrixExponential&); MatrixExponential& operator=(const MatrixExponential&); /** \brief Compute the (3,3)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. * * \param[in] A Argument of matrix exponential */ void pade3(const MatrixType &A); /** \brief Compute the (5,5)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. * * \param[in] A Argument of matrix exponential */ void pade5(const MatrixType &A); /** \brief Compute the (7,7)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. * * \param[in] A Argument of matrix exponential */ void pade7(const MatrixType &A); /** \brief Compute the (9,9)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. * * \param[in] A Argument of matrix exponential */ void pade9(const MatrixType &A); /** \brief Compute the (13,13)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. * * \param[in] A Argument of matrix exponential */ void pade13(const MatrixType &A); /** \brief Compute the (17,17)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. * * This function activates only if your long double is double-double or quadruple. * * \param[in] A Argument of matrix exponential */ void pade17(const MatrixType &A); /** \brief Compute Padé approximant to the exponential. * * Computes \c m_U, \c m_V and \c m_squarings such that * \f$ (V+U)(V-U)^{-1} \f$ is a Padé of * \f$ \exp(2^{-\mbox{squarings}}M) \f$ around \f$ M = 0 \f$. The * degree of the Padé approximant and the value of * squarings are chosen such that the approximation error is no * more than the round-off error. * * The argument of this function should correspond with the (real * part of) the entries of \c m_M. It is used to select the * correct implementation using overloading. */ void computeUV(double); /** \brief Compute Padé approximant to the exponential. * * \sa computeUV(double); */ void computeUV(float); /** \brief Compute Padé approximant to the exponential. * * \sa computeUV(double); */ void computeUV(long double); typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename std::complex ComplexScalar; /** \brief Reference to matrix whose exponential is to be computed. */ typename internal::nested::type m_M; /** \brief Odd-degree terms in numerator of Padé approximant. */ MatrixType m_U; /** \brief Even-degree terms in numerator of Padé approximant. */ MatrixType m_V; /** \brief Used for temporary storage. */ MatrixType m_tmp1; /** \brief Used for temporary storage. */ MatrixType m_tmp2; /** \brief Identity matrix of the same size as \c m_M. */ MatrixType m_Id; /** \brief Number of squarings required in the last step. */ int m_squarings; /** \brief L1 norm of m_M. */ RealScalar m_l1norm; }; template MatrixExponential::MatrixExponential(const MatrixType &M) : m_M(M), m_U(M.rows(),M.cols()), m_V(M.rows(),M.cols()), m_tmp1(M.rows(),M.cols()), m_tmp2(M.rows(),M.cols()), m_Id(MatrixType::Identity(M.rows(), M.cols())), m_squarings(0), m_l1norm(M.cwiseAbs().colwise().sum().maxCoeff()) { /* empty body */ } template template void MatrixExponential::compute(ResultType &result) { #if LDBL_MANT_DIG > 112 // rarely happens if(sizeof(RealScalar) > 14) { result = m_M.matrixFunction(StdStemFunctions::exp); return; } #endif computeUV(RealScalar()); m_tmp1 = m_U + m_V; // numerator of Pade approximant m_tmp2 = -m_U + m_V; // denominator of Pade approximant result = m_tmp2.partialPivLu().solve(m_tmp1); for (int i=0; i EIGEN_STRONG_INLINE void MatrixExponential::pade3(const MatrixType &A) { const RealScalar b[] = {120., 60., 12., 1.}; m_tmp1.noalias() = A * A; m_tmp2 = b[3]*m_tmp1 + b[1]*m_Id; m_U.noalias() = A * m_tmp2; m_V = b[2]*m_tmp1 + b[0]*m_Id; } template EIGEN_STRONG_INLINE void MatrixExponential::pade5(const MatrixType &A) { const RealScalar b[] = {30240., 15120., 3360., 420., 30., 1.}; MatrixType A2 = A * A; m_tmp1.noalias() = A2 * A2; m_tmp2 = b[5]*m_tmp1 + b[3]*A2 + b[1]*m_Id; m_U.noalias() = A * m_tmp2; m_V = b[4]*m_tmp1 + b[2]*A2 + b[0]*m_Id; } template EIGEN_STRONG_INLINE void MatrixExponential::pade7(const MatrixType &A) { const RealScalar b[] = {17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.}; MatrixType A2 = A * A; MatrixType A4 = A2 * A2; m_tmp1.noalias() = A4 * A2; m_tmp2 = b[7]*m_tmp1 + b[5]*A4 + b[3]*A2 + b[1]*m_Id; m_U.noalias() = A * m_tmp2; m_V = b[6]*m_tmp1 + b[4]*A4 + b[2]*A2 + b[0]*m_Id; } template EIGEN_STRONG_INLINE void MatrixExponential::pade9(const MatrixType &A) { const RealScalar b[] = {17643225600., 8821612800., 2075673600., 302702400., 30270240., 2162160., 110880., 3960., 90., 1.}; MatrixType A2 = A * A; MatrixType A4 = A2 * A2; MatrixType A6 = A4 * A2; m_tmp1.noalias() = A6 * A2; m_tmp2 = b[9]*m_tmp1 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*m_Id; m_U.noalias() = A * m_tmp2; m_V = b[8]*m_tmp1 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*m_Id; } template EIGEN_STRONG_INLINE void MatrixExponential::pade13(const MatrixType &A) { const RealScalar b[] = {64764752532480000., 32382376266240000., 7771770303897600., 1187353796428800., 129060195264000., 10559470521600., 670442572800., 33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.}; MatrixType A2 = A * A; MatrixType A4 = A2 * A2; m_tmp1.noalias() = A4 * A2; m_V = b[13]*m_tmp1 + b[11]*A4 + b[9]*A2; // used for temporary storage m_tmp2.noalias() = m_tmp1 * m_V; m_tmp2 += b[7]*m_tmp1 + b[5]*A4 + b[3]*A2 + b[1]*m_Id; m_U.noalias() = A * m_tmp2; m_tmp2 = b[12]*m_tmp1 + b[10]*A4 + b[8]*A2; m_V.noalias() = m_tmp1 * m_tmp2; m_V += b[6]*m_tmp1 + b[4]*A4 + b[2]*A2 + b[0]*m_Id; } #if LDBL_MANT_DIG > 64 template EIGEN_STRONG_INLINE void MatrixExponential::pade17(const MatrixType &A) { const RealScalar b[] = {830034394580628357120000.L, 415017197290314178560000.L, 100610229646136770560000.L, 15720348382208870400000.L, 1774878043152614400000.L, 153822763739893248000.L, 10608466464820224000.L, 595373117923584000.L, 27563570274240000.L, 1060137318240000.L, 33924394183680.L, 899510451840.L, 19554575040.L, 341863200.L, 4651200.L, 46512.L, 306.L, 1.L}; MatrixType A2 = A * A; MatrixType A4 = A2 * A2; MatrixType A6 = A4 * A2; m_tmp1.noalias() = A4 * A4; m_V = b[17]*m_tmp1 + b[15]*A6 + b[13]*A4 + b[11]*A2; // used for temporary storage m_tmp2.noalias() = m_tmp1 * m_V; m_tmp2 += b[9]*m_tmp1 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*m_Id; m_U.noalias() = A * m_tmp2; m_tmp2 = b[16]*m_tmp1 + b[14]*A6 + b[12]*A4 + b[10]*A2; m_V.noalias() = m_tmp1 * m_tmp2; m_V += b[8]*m_tmp1 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*m_Id; } #endif template void MatrixExponential::computeUV(float) { using std::frexp; using std::pow; if (m_l1norm < 4.258730016922831e-001) { pade3(m_M); } else if (m_l1norm < 1.880152677804762e+000) { pade5(m_M); } else { const float maxnorm = 3.925724783138660f; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; MatrixType A = m_M / pow(Scalar(2), m_squarings); pade7(A); } } template void MatrixExponential::computeUV(double) { using std::frexp; using std::pow; if (m_l1norm < 1.495585217958292e-002) { pade3(m_M); } else if (m_l1norm < 2.539398330063230e-001) { pade5(m_M); } else if (m_l1norm < 9.504178996162932e-001) { pade7(m_M); } else if (m_l1norm < 2.097847961257068e+000) { pade9(m_M); } else { const double maxnorm = 5.371920351148152; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; MatrixType A = m_M / pow(Scalar(2), m_squarings); pade13(A); } } template void MatrixExponential::computeUV(long double) { using std::frexp; using std::pow; #if LDBL_MANT_DIG == 53 // double precision computeUV(double()); #elif LDBL_MANT_DIG <= 64 // extended precision if (m_l1norm < 4.1968497232266989671e-003L) { pade3(m_M); } else if (m_l1norm < 1.1848116734693823091e-001L) { pade5(m_M); } else if (m_l1norm < 5.5170388480686700274e-001L) { pade7(m_M); } else if (m_l1norm < 1.3759868875587845383e+000L) { pade9(m_M); } else { const long double maxnorm = 4.0246098906697353063L; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; MatrixType A = m_M / pow(Scalar(2), m_squarings); pade13(A); } #elif LDBL_MANT_DIG <= 106 // double-double if (m_l1norm < 3.2787892205607026992947488108213e-005L) { pade3(m_M); } else if (m_l1norm < 6.4467025060072760084130906076332e-003L) { pade5(m_M); } else if (m_l1norm < 6.8988028496595374751374122881143e-002L) { pade7(m_M); } else if (m_l1norm < 2.7339737518502231741495857201670e-001L) { pade9(m_M); } else if (m_l1norm < 1.3203382096514474905666448850278e+000L) { pade13(m_M); } else { const long double maxnorm = 3.2579440895405400856599663723517L; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; MatrixType A = m_M / pow(Scalar(2), m_squarings); pade17(A); } #elif LDBL_MANT_DIG <= 112 // quadruple precison if (m_l1norm < 1.639394610288918690547467954466970e-005L) { pade3(m_M); } else if (m_l1norm < 4.253237712165275566025884344433009e-003L) { pade5(m_M); } else if (m_l1norm < 5.125804063165764409885122032933142e-002L) { pade7(m_M); } else if (m_l1norm < 2.170000765161155195453205651889853e-001L) { pade9(m_M); } else if (m_l1norm < 1.125358383453143065081397882891878e+000L) { pade13(m_M); } else { const long double maxnorm = 2.884233277829519311757165057717815L; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; MatrixType A = m_M / pow(Scalar(2), m_squarings); pade17(A); } #else // this case should be handled in compute() eigen_assert(false && "Bug in MatrixExponential"); #endif // LDBL_MANT_DIG } /** \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix exponential of some matrix (expression). * * \tparam Derived Type of the argument to the matrix exponential. * * This class holds the argument to the matrix exponential until it * is assigned or evaluated for some other reason (so the argument * should not be changed in the meantime). It is the return type of * MatrixBase::exp() and most of the time this is the only way it is * used. */ template struct MatrixExponentialReturnValue : public ReturnByValue > { typedef typename Derived::Index Index; public: /** \brief Constructor. * * \param[in] src %Matrix (expression) forming the argument of the * matrix exponential. */ MatrixExponentialReturnValue(const Derived& src) : m_src(src) { } /** \brief Compute the matrix exponential. * * \param[out] result the matrix exponential of \p src in the * constructor. */ template inline void evalTo(ResultType& result) const { const typename Derived::PlainObject srcEvaluated = m_src.eval(); MatrixExponential me(srcEvaluated); me.compute(result); } Index rows() const { return m_src.rows(); } Index cols() const { return m_src.cols(); } protected: const Derived& m_src; private: MatrixExponentialReturnValue& operator=(const MatrixExponentialReturnValue&); }; namespace internal { template struct traits > { typedef typename Derived::PlainObject ReturnType; }; } template const MatrixExponentialReturnValue MatrixBase::exp() const { eigen_assert(rows() == cols()); return MatrixExponentialReturnValue(derived()); } } // end namespace Eigen #endif // EIGEN_MATRIX_EXPONENTIAL RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h0000644000176200001440000004042412253717461027547 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_SQUARE_ROOT #define EIGEN_MATRIX_SQUARE_ROOT namespace Eigen { /** \ingroup MatrixFunctions_Module * \brief Class for computing matrix square roots of upper quasi-triangular matrices. * \tparam MatrixType type of the argument of the matrix square root, * expected to be an instantiation of the Matrix class template. * * This class computes the square root of the upper quasi-triangular * matrix stored in the upper Hessenberg part of the matrix passed to * the constructor. * * \sa MatrixSquareRoot, MatrixSquareRootTriangular */ template class MatrixSquareRootQuasiTriangular { public: /** \brief Constructor. * * \param[in] A upper quasi-triangular matrix whose square root * is to be computed. * * The class stores a reference to \p A, so it should not be * changed (or destroyed) before compute() is called. */ MatrixSquareRootQuasiTriangular(const MatrixType& A) : m_A(A) { eigen_assert(A.rows() == A.cols()); } /** \brief Compute the matrix square root * * \param[out] result square root of \p A, as specified in the constructor. * * Only the upper Hessenberg part of \p result is updated, the * rest is not touched. See MatrixBase::sqrt() for details on * how this computation is implemented. */ template void compute(ResultType &result); private: typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; void computeDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T); void computeOffDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T); void compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i); void compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j); void compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j); void compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j); void compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j); template static void solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A, const SmallMatrixType& B, const SmallMatrixType& C); const MatrixType& m_A; }; template template void MatrixSquareRootQuasiTriangular::compute(ResultType &result) { result.resize(m_A.rows(), m_A.cols()); computeDiagonalPartOfSqrt(result, m_A); computeOffDiagonalPartOfSqrt(result, m_A); } // pre: T is quasi-upper-triangular and sqrtT is a zero matrix of the same size // post: the diagonal blocks of sqrtT are the square roots of the diagonal blocks of T template void MatrixSquareRootQuasiTriangular::computeDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T) { using std::sqrt; const Index size = m_A.rows(); for (Index i = 0; i < size; i++) { if (i == size - 1 || T.coeff(i+1, i) == 0) { eigen_assert(T(i,i) >= 0); sqrtT.coeffRef(i,i) = sqrt(T.coeff(i,i)); } else { compute2x2diagonalBlock(sqrtT, T, i); ++i; } } } // pre: T is quasi-upper-triangular and diagonal blocks of sqrtT are square root of diagonal blocks of T. // post: sqrtT is the square root of T. template void MatrixSquareRootQuasiTriangular::computeOffDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T) { const Index size = m_A.rows(); for (Index j = 1; j < size; j++) { if (T.coeff(j, j-1) != 0) // if T(j-1:j, j-1:j) is a 2-by-2 block continue; for (Index i = j-1; i >= 0; i--) { if (i > 0 && T.coeff(i, i-1) != 0) // if T(i-1:i, i-1:i) is a 2-by-2 block continue; bool iBlockIs2x2 = (i < size - 1) && (T.coeff(i+1, i) != 0); bool jBlockIs2x2 = (j < size - 1) && (T.coeff(j+1, j) != 0); if (iBlockIs2x2 && jBlockIs2x2) compute2x2offDiagonalBlock(sqrtT, T, i, j); else if (iBlockIs2x2 && !jBlockIs2x2) compute2x1offDiagonalBlock(sqrtT, T, i, j); else if (!iBlockIs2x2 && jBlockIs2x2) compute1x2offDiagonalBlock(sqrtT, T, i, j); else if (!iBlockIs2x2 && !jBlockIs2x2) compute1x1offDiagonalBlock(sqrtT, T, i, j); } } } // pre: T.block(i,i,2,2) has complex conjugate eigenvalues // post: sqrtT.block(i,i,2,2) is square root of T.block(i,i,2,2) template void MatrixSquareRootQuasiTriangular ::compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i) { // TODO: This case (2-by-2 blocks with complex conjugate eigenvalues) is probably hidden somewhere // in EigenSolver. If we expose it, we could call it directly from here. Matrix block = T.template block<2,2>(i,i); EigenSolver > es(block); sqrtT.template block<2,2>(i,i) = (es.eigenvectors() * es.eigenvalues().cwiseSqrt().asDiagonal() * es.eigenvectors().inverse()).real(); } // pre: block structure of T is such that (i,j) is a 1x1 block, // all blocks of sqrtT to left of and below (i,j) are correct // post: sqrtT(i,j) has the correct value template void MatrixSquareRootQuasiTriangular ::compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j) { Scalar tmp = (sqrtT.row(i).segment(i+1,j-i-1) * sqrtT.col(j).segment(i+1,j-i-1)).value(); sqrtT.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (sqrtT.coeff(i,i) + sqrtT.coeff(j,j)); } // similar to compute1x1offDiagonalBlock() template void MatrixSquareRootQuasiTriangular ::compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j) { Matrix rhs = T.template block<1,2>(i,j); if (j-i > 1) rhs -= sqrtT.block(i, i+1, 1, j-i-1) * sqrtT.block(i+1, j, j-i-1, 2); Matrix A = sqrtT.coeff(i,i) * Matrix::Identity(); A += sqrtT.template block<2,2>(j,j).transpose(); sqrtT.template block<1,2>(i,j).transpose() = A.fullPivLu().solve(rhs.transpose()); } // similar to compute1x1offDiagonalBlock() template void MatrixSquareRootQuasiTriangular ::compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j) { Matrix rhs = T.template block<2,1>(i,j); if (j-i > 2) rhs -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 1); Matrix A = sqrtT.coeff(j,j) * Matrix::Identity(); A += sqrtT.template block<2,2>(i,i); sqrtT.template block<2,1>(i,j) = A.fullPivLu().solve(rhs); } // similar to compute1x1offDiagonalBlock() template void MatrixSquareRootQuasiTriangular ::compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i, typename MatrixType::Index j) { Matrix A = sqrtT.template block<2,2>(i,i); Matrix B = sqrtT.template block<2,2>(j,j); Matrix C = T.template block<2,2>(i,j); if (j-i > 2) C -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 2); Matrix X; solveAuxiliaryEquation(X, A, B, C); sqrtT.template block<2,2>(i,j) = X; } // solves the equation A X + X B = C where all matrices are 2-by-2 template template void MatrixSquareRootQuasiTriangular ::solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A, const SmallMatrixType& B, const SmallMatrixType& C) { EIGEN_STATIC_ASSERT((internal::is_same >::value), EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT); Matrix coeffMatrix = Matrix::Zero(); coeffMatrix.coeffRef(0,0) = A.coeff(0,0) + B.coeff(0,0); coeffMatrix.coeffRef(1,1) = A.coeff(0,0) + B.coeff(1,1); coeffMatrix.coeffRef(2,2) = A.coeff(1,1) + B.coeff(0,0); coeffMatrix.coeffRef(3,3) = A.coeff(1,1) + B.coeff(1,1); coeffMatrix.coeffRef(0,1) = B.coeff(1,0); coeffMatrix.coeffRef(0,2) = A.coeff(0,1); coeffMatrix.coeffRef(1,0) = B.coeff(0,1); coeffMatrix.coeffRef(1,3) = A.coeff(0,1); coeffMatrix.coeffRef(2,0) = A.coeff(1,0); coeffMatrix.coeffRef(2,3) = B.coeff(1,0); coeffMatrix.coeffRef(3,1) = A.coeff(1,0); coeffMatrix.coeffRef(3,2) = B.coeff(0,1); Matrix rhs; rhs.coeffRef(0) = C.coeff(0,0); rhs.coeffRef(1) = C.coeff(0,1); rhs.coeffRef(2) = C.coeff(1,0); rhs.coeffRef(3) = C.coeff(1,1); Matrix result; result = coeffMatrix.fullPivLu().solve(rhs); X.coeffRef(0,0) = result.coeff(0); X.coeffRef(0,1) = result.coeff(1); X.coeffRef(1,0) = result.coeff(2); X.coeffRef(1,1) = result.coeff(3); } /** \ingroup MatrixFunctions_Module * \brief Class for computing matrix square roots of upper triangular matrices. * \tparam MatrixType type of the argument of the matrix square root, * expected to be an instantiation of the Matrix class template. * * This class computes the square root of the upper triangular matrix * stored in the upper triangular part (including the diagonal) of * the matrix passed to the constructor. * * \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular */ template class MatrixSquareRootTriangular { public: MatrixSquareRootTriangular(const MatrixType& A) : m_A(A) { eigen_assert(A.rows() == A.cols()); } /** \brief Compute the matrix square root * * \param[out] result square root of \p A, as specified in the constructor. * * Only the upper triangular part (including the diagonal) of * \p result is updated, the rest is not touched. See * MatrixBase::sqrt() for details on how this computation is * implemented. */ template void compute(ResultType &result); private: const MatrixType& m_A; }; template template void MatrixSquareRootTriangular::compute(ResultType &result) { using std::sqrt; // Compute square root of m_A and store it in upper triangular part of result // This uses that the square root of triangular matrices can be computed directly. result.resize(m_A.rows(), m_A.cols()); typedef typename MatrixType::Index Index; for (Index i = 0; i < m_A.rows(); i++) { result.coeffRef(i,i) = sqrt(m_A.coeff(i,i)); } for (Index j = 1; j < m_A.cols(); j++) { for (Index i = j-1; i >= 0; i--) { typedef typename MatrixType::Scalar Scalar; // if i = j-1, then segment has length 0 so tmp = 0 Scalar tmp = (result.row(i).segment(i+1,j-i-1) * result.col(j).segment(i+1,j-i-1)).value(); // denominator may be zero if original matrix is singular result.coeffRef(i,j) = (m_A.coeff(i,j) - tmp) / (result.coeff(i,i) + result.coeff(j,j)); } } } /** \ingroup MatrixFunctions_Module * \brief Class for computing matrix square roots of general matrices. * \tparam MatrixType type of the argument of the matrix square root, * expected to be an instantiation of the Matrix class template. * * \sa MatrixSquareRootTriangular, MatrixSquareRootQuasiTriangular, MatrixBase::sqrt() */ template ::Scalar>::IsComplex> class MatrixSquareRoot { public: /** \brief Constructor. * * \param[in] A matrix whose square root is to be computed. * * The class stores a reference to \p A, so it should not be * changed (or destroyed) before compute() is called. */ MatrixSquareRoot(const MatrixType& A); /** \brief Compute the matrix square root * * \param[out] result square root of \p A, as specified in the constructor. * * See MatrixBase::sqrt() for details on how this computation is * implemented. */ template void compute(ResultType &result); }; // ********** Partial specialization for real matrices ********** template class MatrixSquareRoot { public: MatrixSquareRoot(const MatrixType& A) : m_A(A) { eigen_assert(A.rows() == A.cols()); } template void compute(ResultType &result) { // Compute Schur decomposition of m_A const RealSchur schurOfA(m_A); const MatrixType& T = schurOfA.matrixT(); const MatrixType& U = schurOfA.matrixU(); // Compute square root of T MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.cols()); MatrixSquareRootQuasiTriangular(T).compute(sqrtT); // Compute square root of m_A result = U * sqrtT * U.adjoint(); } private: const MatrixType& m_A; }; // ********** Partial specialization for complex matrices ********** template class MatrixSquareRoot { public: MatrixSquareRoot(const MatrixType& A) : m_A(A) { eigen_assert(A.rows() == A.cols()); } template void compute(ResultType &result) { // Compute Schur decomposition of m_A const ComplexSchur schurOfA(m_A); const MatrixType& T = schurOfA.matrixT(); const MatrixType& U = schurOfA.matrixU(); // Compute square root of T MatrixType sqrtT; MatrixSquareRootTriangular(T).compute(sqrtT); // Compute square root of m_A result = U * (sqrtT.template triangularView() * U.adjoint()); } private: const MatrixType& m_A; }; /** \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix square root of some matrix (expression). * * \tparam Derived Type of the argument to the matrix square root. * * This class holds the argument to the matrix square root until it * is assigned or evaluated for some other reason (so the argument * should not be changed in the meantime). It is the return type of * MatrixBase::sqrt() and most of the time this is the only way it is * used. */ template class MatrixSquareRootReturnValue : public ReturnByValue > { typedef typename Derived::Index Index; public: /** \brief Constructor. * * \param[in] src %Matrix (expression) forming the argument of the * matrix square root. */ MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { } /** \brief Compute the matrix square root. * * \param[out] result the matrix square root of \p src in the * constructor. */ template inline void evalTo(ResultType& result) const { const typename Derived::PlainObject srcEvaluated = m_src.eval(); MatrixSquareRoot me(srcEvaluated); me.compute(result); } Index rows() const { return m_src.rows(); } Index cols() const { return m_src.cols(); } protected: const Derived& m_src; private: MatrixSquareRootReturnValue& operator=(const MatrixSquareRootReturnValue&); }; namespace internal { template struct traits > { typedef typename Derived::PlainObject ReturnType; }; } template const MatrixSquareRootReturnValue MatrixBase::sqrt() const { eigen_assert(rows() == cols()); return MatrixSquareRootReturnValue(derived()); } } // end namespace Eigen #endif // EIGEN_MATRIX_FUNCTION RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h0000644000176200001440000005477112253717461027403 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Jitse Niesen // Copyright (C) 2011 Chen-Pang He // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_LOGARITHM #define EIGEN_MATRIX_LOGARITHM #ifndef M_PI #define M_PI 3.141592653589793238462643383279503L #endif namespace Eigen { /** \ingroup MatrixFunctions_Module * \class MatrixLogarithmAtomic * \brief Helper class for computing matrix logarithm of atomic matrices. * * \internal * Here, an atomic matrix is a triangular matrix whose diagonal * entries are close to each other. * * \sa class MatrixFunctionAtomic, MatrixBase::log() */ template class MatrixLogarithmAtomic { public: typedef typename MatrixType::Scalar Scalar; // typedef typename MatrixType::Index Index; typedef typename NumTraits::Real RealScalar; // typedef typename internal::stem_function::type StemFunction; // typedef Matrix VectorType; /** \brief Constructor. */ MatrixLogarithmAtomic() { } /** \brief Compute matrix logarithm of atomic matrix * \param[in] A argument of matrix logarithm, should be upper triangular and atomic * \returns The logarithm of \p A. */ MatrixType compute(const MatrixType& A); private: void compute2x2(const MatrixType& A, MatrixType& result); void computeBig(const MatrixType& A, MatrixType& result); int getPadeDegree(float normTminusI); int getPadeDegree(double normTminusI); int getPadeDegree(long double normTminusI); void computePade(MatrixType& result, const MatrixType& T, int degree); void computePade3(MatrixType& result, const MatrixType& T); void computePade4(MatrixType& result, const MatrixType& T); void computePade5(MatrixType& result, const MatrixType& T); void computePade6(MatrixType& result, const MatrixType& T); void computePade7(MatrixType& result, const MatrixType& T); void computePade8(MatrixType& result, const MatrixType& T); void computePade9(MatrixType& result, const MatrixType& T); void computePade10(MatrixType& result, const MatrixType& T); void computePade11(MatrixType& result, const MatrixType& T); static const int minPadeDegree = 3; static const int maxPadeDegree = std::numeric_limits::digits<= 24? 5: // single precision std::numeric_limits::digits<= 53? 7: // double precision std::numeric_limits::digits<= 64? 8: // extended precision std::numeric_limits::digits<=106? 10: // double-double 11; // quadruple precision // Prevent copying MatrixLogarithmAtomic(const MatrixLogarithmAtomic&); MatrixLogarithmAtomic& operator=(const MatrixLogarithmAtomic&); }; /** \brief Compute logarithm of triangular matrix with clustered eigenvalues. */ template MatrixType MatrixLogarithmAtomic::compute(const MatrixType& A) { using std::log; MatrixType result(A.rows(), A.rows()); if (A.rows() == 1) result(0,0) = log(A(0,0)); else if (A.rows() == 2) compute2x2(A, result); else computeBig(A, result); return result; } /** \brief Compute logarithm of 2x2 triangular matrix. */ template void MatrixLogarithmAtomic::compute2x2(const MatrixType& A, MatrixType& result) { using std::abs; using std::ceil; using std::imag; using std::log; Scalar logA00 = log(A(0,0)); Scalar logA11 = log(A(1,1)); result(0,0) = logA00; result(1,0) = Scalar(0); result(1,1) = logA11; if (A(0,0) == A(1,1)) { result(0,1) = A(0,1) / A(0,0); } else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) { result(0,1) = A(0,1) * (logA11 - logA00) / (A(1,1) - A(0,0)); } else { // computation in previous branch is inaccurate if A(1,1) \approx A(0,0) int unwindingNumber = static_cast(ceil((imag(logA11 - logA00) - M_PI) / (2*M_PI))); Scalar y = A(1,1) - A(0,0), x = A(1,1) + A(0,0); result(0,1) = A(0,1) * (Scalar(2) * numext::atanh2(y,x) + Scalar(0,2*M_PI*unwindingNumber)) / y; } } /** \brief Compute logarithm of triangular matrices with size > 2. * \details This uses a inverse scale-and-square algorithm. */ template void MatrixLogarithmAtomic::computeBig(const MatrixType& A, MatrixType& result) { using std::pow; int numberOfSquareRoots = 0; int numberOfExtraSquareRoots = 0; int degree; MatrixType T = A, sqrtT; const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1: // single precision maxPadeDegree<= 7? 2.6429608311114350e-1: // double precision maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L: // double-double 1.1880960220216759245467951592883642e-1L; // quadruple precision while (true) { RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff(); if (normTminusI < maxNormForPade) { degree = getPadeDegree(normTminusI); int degree2 = getPadeDegree(normTminusI / RealScalar(2)); if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1)) break; ++numberOfExtraSquareRoots; } MatrixSquareRootTriangular(T).compute(sqrtT); T = sqrtT.template triangularView(); ++numberOfSquareRoots; } computePade(result, T, degree); result *= pow(RealScalar(2), numberOfSquareRoots); } /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */ template int MatrixLogarithmAtomic::getPadeDegree(float normTminusI) { const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1, 5.3149729967117310e-1 }; int degree = 3; for (; degree <= maxPadeDegree; ++degree) if (normTminusI <= maxNormForPade[degree - minPadeDegree]) break; return degree; } /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */ template int MatrixLogarithmAtomic::getPadeDegree(double normTminusI) { const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2, 1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 }; int degree = 3; for (; degree <= maxPadeDegree; ++degree) if (normTminusI <= maxNormForPade[degree - minPadeDegree]) break; return degree; } /* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */ template int MatrixLogarithmAtomic::getPadeDegree(long double normTminusI) { #if LDBL_MANT_DIG == 53 // double precision const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L, 1.1352802267628681e-1L, 1.8662860613541288e-1L, 2.642960831111435e-1L }; #elif LDBL_MANT_DIG <= 64 // extended precision const long double maxNormForPade[] = { 5.48256690357782863103e-3L /* degree = 3 */, 2.34559162387971167321e-2L, 5.84603923897347449857e-2L, 1.08486423756725170223e-1L, 1.68385767881294446649e-1L, 2.32777776523703892094e-1L }; #elif LDBL_MANT_DIG <= 106 // double-double const long double maxNormForPade[] = { 8.58970550342939562202529664318890e-5L /* degree = 3 */, 9.34074328446359654039446552677759e-4L, 4.26117194647672175773064114582860e-3L, 1.21546224740281848743149666560464e-2L, 2.61100544998339436713088248557444e-2L, 4.66170074627052749243018566390567e-2L, 7.32585144444135027565872014932387e-2L, 1.05026503471351080481093652651105e-1L }; #else // quadruple precision const long double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5L /* degree = 3 */, 5.8853168473544560470387769480192666e-4L, 2.9216120366601315391789493628113520e-3L, 8.8415758124319434347116734705174308e-3L, 1.9850836029449446668518049562565291e-2L, 3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L, 8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L }; #endif int degree = 3; for (; degree <= maxPadeDegree; ++degree) if (normTminusI <= maxNormForPade[degree - minPadeDegree]) break; return degree; } /* \brief Compute Pade approximation to matrix logarithm */ template void MatrixLogarithmAtomic::computePade(MatrixType& result, const MatrixType& T, int degree) { switch (degree) { case 3: computePade3(result, T); break; case 4: computePade4(result, T); break; case 5: computePade5(result, T); break; case 6: computePade6(result, T); break; case 7: computePade7(result, T); break; case 8: computePade8(result, T); break; case 9: computePade9(result, T); break; case 10: computePade10(result, T); break; case 11: computePade11(result, T); break; default: assert(false); // should never happen } } template void MatrixLogarithmAtomic::computePade3(MatrixType& result, const MatrixType& T) { const int degree = 3; const RealScalar nodes[] = { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L, 0.8872983346207416885179265399782400L }; const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L, 0.2777777777777777777777777777777778L }; eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) .template triangularView().solve(TminusI); } template void MatrixLogarithmAtomic::computePade4(MatrixType& result, const MatrixType& T) { const int degree = 4; const RealScalar nodes[] = { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L, 0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L }; const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L, 0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L }; eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) .template triangularView().solve(TminusI); } template void MatrixLogarithmAtomic::computePade5(MatrixType& result, const MatrixType& T) { const int degree = 5; const RealScalar nodes[] = { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L, 0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L, 0.9530899229693319963988134391496965L }; const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L, 0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L, 0.1184634425280945437571320203599587L }; eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) .template triangularView().solve(TminusI); } template void MatrixLogarithmAtomic::computePade6(MatrixType& result, const MatrixType& T) { const int degree = 6; const RealScalar nodes[] = { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L, 0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L, 0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L }; const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L, 0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L, 0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L }; eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) .template triangularView().solve(TminusI); } template void MatrixLogarithmAtomic::computePade7(MatrixType& result, const MatrixType& T) { const int degree = 7; const RealScalar nodes[] = { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L, 0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L, 0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L, 0.9745539561713792622630948420239256L }; const RealScalar weights[] = { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L, 0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L, 0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L, 0.0647424830844348466353057163395410L }; eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) .template triangularView().solve(TminusI); } template void MatrixLogarithmAtomic::computePade8(MatrixType& result, const MatrixType& T) { const int degree = 8; const RealScalar nodes[] = { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L, 0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L, 0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L, 0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L }; const RealScalar weights[] = { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L, 0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L, 0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L, 0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L }; eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) .template triangularView().solve(TminusI); } template void MatrixLogarithmAtomic::computePade9(MatrixType& result, const MatrixType& T) { const int degree = 9; const RealScalar nodes[] = { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L, 0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L, 0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L, 0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L, 0.9840801197538130449177881014518364L }; const RealScalar weights[] = { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L, 0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L, 0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L, 0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L, 0.0406371941807872059859460790552618L }; eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) .template triangularView().solve(TminusI); } template void MatrixLogarithmAtomic::computePade10(MatrixType& result, const MatrixType& T) { const int degree = 10; const RealScalar nodes[] = { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L, 0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L, 0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L, 0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L, 0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L }; const RealScalar weights[] = { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L, 0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L, 0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L, 0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L, 0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L }; eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) .template triangularView().solve(TminusI); } template void MatrixLogarithmAtomic::computePade11(MatrixType& result, const MatrixType& T) { const int degree = 11; const RealScalar nodes[] = { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L, 0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L, 0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L, 0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L, 0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L, 0.9891143290730284964019690005614287L }; const RealScalar weights[] = { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L, 0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L, 0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L, 0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L, 0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L, 0.0278342835580868332413768602212743L }; eigen_assert(degree <= maxPadeDegree); MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); result.setZero(T.rows(), T.rows()); for (int k = 0; k < degree; ++k) result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) .template triangularView().solve(TminusI); } /** \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix logarithm of some matrix (expression). * * \tparam Derived Type of the argument to the matrix function. * * This class holds the argument to the matrix function until it is * assigned or evaluated for some other reason (so the argument * should not be changed in the meantime). It is the return type of * MatrixBase::log() and most of the time this is the only way it * is used. */ template class MatrixLogarithmReturnValue : public ReturnByValue > { public: typedef typename Derived::Scalar Scalar; typedef typename Derived::Index Index; /** \brief Constructor. * * \param[in] A %Matrix (expression) forming the argument of the matrix logarithm. */ MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { } /** \brief Compute the matrix logarithm. * * \param[out] result Logarithm of \p A, where \A is as specified in the constructor. */ template inline void evalTo(ResultType& result) const { typedef typename Derived::PlainObject PlainObject; typedef internal::traits Traits; static const int RowsAtCompileTime = Traits::RowsAtCompileTime; static const int ColsAtCompileTime = Traits::ColsAtCompileTime; static const int Options = PlainObject::Options; typedef std::complex::Real> ComplexScalar; typedef Matrix DynMatrixType; typedef MatrixLogarithmAtomic AtomicType; AtomicType atomic; const PlainObject Aevaluated = m_A.eval(); MatrixFunction mf(Aevaluated, atomic); mf.compute(result); } Index rows() const { return m_A.rows(); } Index cols() const { return m_A.cols(); } private: typename internal::nested::type m_A; MatrixLogarithmReturnValue& operator=(const MatrixLogarithmReturnValue&); }; namespace internal { template struct traits > { typedef typename Derived::PlainObject ReturnType; }; } /********** MatrixBase method **********/ template const MatrixLogarithmReturnValue MatrixBase::log() const { eigen_assert(rows() == cols()); return MatrixLogarithmReturnValue(derived()); } } // end namespace Eigen #endif // EIGEN_MATRIX_LOGARITHM RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h0000644000176200001440000004217312446006401026527 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012, 2013 Chen-Pang He // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_POWER #define EIGEN_MATRIX_POWER namespace Eigen { template class MatrixPower; template class MatrixPowerRetval : public ReturnByValue< MatrixPowerRetval > { public: typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; MatrixPowerRetval(MatrixPower& pow, RealScalar p) : m_pow(pow), m_p(p) { } template inline void evalTo(ResultType& res) const { m_pow.compute(res, m_p); } Index rows() const { return m_pow.rows(); } Index cols() const { return m_pow.cols(); } private: MatrixPower& m_pow; const RealScalar m_p; MatrixPowerRetval& operator=(const MatrixPowerRetval&); }; template class MatrixPowerAtomic { private: enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef std::complex ComplexScalar; typedef typename MatrixType::Index Index; typedef Array ArrayType; const MatrixType& m_A; RealScalar m_p; void computePade(int degree, const MatrixType& IminusT, MatrixType& res) const; void compute2x2(MatrixType& res, RealScalar p) const; void computeBig(MatrixType& res) const; static int getPadeDegree(float normIminusT); static int getPadeDegree(double normIminusT); static int getPadeDegree(long double normIminusT); static ComplexScalar computeSuperDiag(const ComplexScalar&, const ComplexScalar&, RealScalar p); static RealScalar computeSuperDiag(RealScalar, RealScalar, RealScalar p); public: MatrixPowerAtomic(const MatrixType& T, RealScalar p); void compute(MatrixType& res) const; }; template MatrixPowerAtomic::MatrixPowerAtomic(const MatrixType& T, RealScalar p) : m_A(T), m_p(p) { eigen_assert(T.rows() == T.cols()); } template void MatrixPowerAtomic::compute(MatrixType& res) const { res.resizeLike(m_A); switch (m_A.rows()) { case 0: break; case 1: res(0,0) = std::pow(m_A(0,0), m_p); break; case 2: compute2x2(res, m_p); break; default: computeBig(res); } } template void MatrixPowerAtomic::computePade(int degree, const MatrixType& IminusT, MatrixType& res) const { int i = degree<<1; res = (m_p-degree) / ((i-1)<<1) * IminusT; for (--i; i; --i) { res = (MatrixType::Identity(IminusT.rows(), IminusT.cols()) + res).template triangularView() .solve((i==1 ? -m_p : i&1 ? (-m_p-(i>>1))/(i<<1) : (m_p-(i>>1))/((i-1)<<1)) * IminusT).eval(); } res += MatrixType::Identity(IminusT.rows(), IminusT.cols()); } // This function assumes that res has the correct size (see bug 614) template void MatrixPowerAtomic::compute2x2(MatrixType& res, RealScalar p) const { using std::abs; using std::pow; res.coeffRef(0,0) = pow(m_A.coeff(0,0), p); for (Index i=1; i < m_A.cols(); ++i) { res.coeffRef(i,i) = pow(m_A.coeff(i,i), p); if (m_A.coeff(i-1,i-1) == m_A.coeff(i,i)) res.coeffRef(i-1,i) = p * pow(m_A.coeff(i,i), p-1); else if (2*abs(m_A.coeff(i-1,i-1)) < abs(m_A.coeff(i,i)) || 2*abs(m_A.coeff(i,i)) < abs(m_A.coeff(i-1,i-1))) res.coeffRef(i-1,i) = (res.coeff(i,i)-res.coeff(i-1,i-1)) / (m_A.coeff(i,i)-m_A.coeff(i-1,i-1)); else res.coeffRef(i-1,i) = computeSuperDiag(m_A.coeff(i,i), m_A.coeff(i-1,i-1), p); res.coeffRef(i-1,i) *= m_A.coeff(i-1,i); } } template void MatrixPowerAtomic::computeBig(MatrixType& res) const { const int digits = std::numeric_limits::digits; const RealScalar maxNormForPade = digits <= 24? 4.3386528e-1f: // sigle precision digits <= 53? 2.789358995219730e-1: // double precision digits <= 64? 2.4471944416607995472e-1L: // extended precision digits <= 106? 1.1016843812851143391275867258512e-1L: // double-double 9.134603732914548552537150753385375e-2L; // quadruple precision MatrixType IminusT, sqrtT, T = m_A.template triangularView(); RealScalar normIminusT; int degree, degree2, numberOfSquareRoots = 0; bool hasExtraSquareRoot = false; /* FIXME * For singular T, norm(I - T) >= 1 but maxNormForPade < 1, leads to infinite * loop. We should move 0 eigenvalues to bottom right corner. We need not * worry about tiny values (e.g. 1e-300) because they will reach 1 if * repetitively sqrt'ed. * * If the 0 eigenvalues are semisimple, they can form a 0 matrix at the * bottom right corner. * * [ T A ]^p [ T^p (T^-1 T^p A) ] * [ ] = [ ] * [ 0 0 ] [ 0 0 ] */ for (Index i=0; i < m_A.cols(); ++i) eigen_assert(m_A(i,i) != RealScalar(0)); while (true) { IminusT = MatrixType::Identity(m_A.rows(), m_A.cols()) - T; normIminusT = IminusT.cwiseAbs().colwise().sum().maxCoeff(); if (normIminusT < maxNormForPade) { degree = getPadeDegree(normIminusT); degree2 = getPadeDegree(normIminusT/2); if (degree - degree2 <= 1 || hasExtraSquareRoot) break; hasExtraSquareRoot = true; } MatrixSquareRootTriangular(T).compute(sqrtT); T = sqrtT.template triangularView(); ++numberOfSquareRoots; } computePade(degree, IminusT, res); for (; numberOfSquareRoots; --numberOfSquareRoots) { compute2x2(res, std::ldexp(m_p, -numberOfSquareRoots)); res = res.template triangularView() * res; } compute2x2(res, m_p); } template inline int MatrixPowerAtomic::getPadeDegree(float normIminusT) { const float maxNormForPade[] = { 2.8064004e-1f /* degree = 3 */ , 4.3386528e-1f }; int degree = 3; for (; degree <= 4; ++degree) if (normIminusT <= maxNormForPade[degree - 3]) break; return degree; } template inline int MatrixPowerAtomic::getPadeDegree(double normIminusT) { const double maxNormForPade[] = { 1.884160592658218e-2 /* degree = 3 */ , 6.038881904059573e-2, 1.239917516308172e-1, 1.999045567181744e-1, 2.789358995219730e-1 }; int degree = 3; for (; degree <= 7; ++degree) if (normIminusT <= maxNormForPade[degree - 3]) break; return degree; } template inline int MatrixPowerAtomic::getPadeDegree(long double normIminusT) { #if LDBL_MANT_DIG == 53 const int maxPadeDegree = 7; const double maxNormForPade[] = { 1.884160592658218e-2L /* degree = 3 */ , 6.038881904059573e-2L, 1.239917516308172e-1L, 1.999045567181744e-1L, 2.789358995219730e-1L }; #elif LDBL_MANT_DIG <= 64 const int maxPadeDegree = 8; const double maxNormForPade[] = { 6.3854693117491799460e-3L /* degree = 3 */ , 2.6394893435456973676e-2L, 6.4216043030404063729e-2L, 1.1701165502926694307e-1L, 1.7904284231268670284e-1L, 2.4471944416607995472e-1L }; #elif LDBL_MANT_DIG <= 106 const int maxPadeDegree = 10; const double maxNormForPade[] = { 1.0007161601787493236741409687186e-4L /* degree = 3 */ , 1.0007161601787493236741409687186e-3L, 4.7069769360887572939882574746264e-3L, 1.3220386624169159689406653101695e-2L, 2.8063482381631737920612944054906e-2L, 4.9625993951953473052385361085058e-2L, 7.7367040706027886224557538328171e-2L, 1.1016843812851143391275867258512e-1L }; #else const int maxPadeDegree = 10; const double maxNormForPade[] = { 5.524506147036624377378713555116378e-5L /* degree = 3 */ , 6.640600568157479679823602193345995e-4L, 3.227716520106894279249709728084626e-3L, 9.619593944683432960546978734646284e-3L, 2.134595382433742403911124458161147e-2L, 3.908166513900489428442993794761185e-2L, 6.266780814639442865832535460550138e-2L, 9.134603732914548552537150753385375e-2L }; #endif int degree = 3; for (; degree <= maxPadeDegree; ++degree) if (normIminusT <= maxNormForPade[degree - 3]) break; return degree; } template inline typename MatrixPowerAtomic::ComplexScalar MatrixPowerAtomic::computeSuperDiag(const ComplexScalar& curr, const ComplexScalar& prev, RealScalar p) { ComplexScalar logCurr = std::log(curr); ComplexScalar logPrev = std::log(prev); int unwindingNumber = std::ceil((numext::imag(logCurr - logPrev) - M_PI) / (2*M_PI)); ComplexScalar w = numext::atanh2(curr - prev, curr + prev) + ComplexScalar(0, M_PI*unwindingNumber); return RealScalar(2) * std::exp(RealScalar(0.5) * p * (logCurr + logPrev)) * std::sinh(p * w) / (curr - prev); } template inline typename MatrixPowerAtomic::RealScalar MatrixPowerAtomic::computeSuperDiag(RealScalar curr, RealScalar prev, RealScalar p) { RealScalar w = numext::atanh2(curr - prev, curr + prev); return 2 * std::exp(p * (std::log(curr) + std::log(prev)) / 2) * std::sinh(p * w) / (curr - prev); } /** * \ingroup MatrixFunctions_Module * * \brief Class for computing matrix powers. * * \tparam MatrixType type of the base, expected to be an instantiation * of the Matrix class template. * * This class is capable of computing real/complex matrices raised to * an arbitrary real power. Meanwhile, it saves the result of Schur * decomposition if an non-integral power has even been calculated. * Therefore, if you want to compute multiple (>= 2) matrix powers * for the same matrix, using the class directly is more efficient than * calling MatrixBase::pow(). * * Example: * \include MatrixPower_optimal.cpp * Output: \verbinclude MatrixPower_optimal.out */ template class MatrixPower { private: enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; public: /** * \brief Constructor. * * \param[in] A the base of the matrix power. * * The class stores a reference to A, so it should not be changed * (or destroyed) before evaluation. */ explicit MatrixPower(const MatrixType& A) : m_A(A), m_conditionNumber(0) { eigen_assert(A.rows() == A.cols()); } /** * \brief Returns the matrix power. * * \param[in] p exponent, a real scalar. * \return The expression \f$ A^p \f$, where A is specified in the * constructor. */ const MatrixPowerRetval operator()(RealScalar p) { return MatrixPowerRetval(*this, p); } /** * \brief Compute the matrix power. * * \param[in] p exponent, a real scalar. * \param[out] res \f$ A^p \f$ where A is specified in the * constructor. */ template void compute(ResultType& res, RealScalar p); Index rows() const { return m_A.rows(); } Index cols() const { return m_A.cols(); } private: typedef std::complex ComplexScalar; typedef Matrix ComplexMatrix; typename MatrixType::Nested m_A; MatrixType m_tmp; ComplexMatrix m_T, m_U, m_fT; RealScalar m_conditionNumber; RealScalar modfAndInit(RealScalar, RealScalar*); template void computeIntPower(ResultType&, RealScalar); template void computeFracPower(ResultType&, RealScalar); template static void revertSchur( Matrix& res, const ComplexMatrix& T, const ComplexMatrix& U); template static void revertSchur( Matrix& res, const ComplexMatrix& T, const ComplexMatrix& U); }; template template void MatrixPower::compute(ResultType& res, RealScalar p) { switch (cols()) { case 0: break; case 1: res(0,0) = std::pow(m_A.coeff(0,0), p); break; default: RealScalar intpart, x = modfAndInit(p, &intpart); computeIntPower(res, intpart); computeFracPower(res, x); } } template typename MatrixPower::RealScalar MatrixPower::modfAndInit(RealScalar x, RealScalar* intpart) { typedef Array RealArray; *intpart = std::floor(x); RealScalar res = x - *intpart; if (!m_conditionNumber && res) { const ComplexSchur schurOfA(m_A); m_T = schurOfA.matrixT(); m_U = schurOfA.matrixU(); const RealArray absTdiag = m_T.diagonal().array().abs(); m_conditionNumber = absTdiag.maxCoeff() / absTdiag.minCoeff(); } if (res>RealScalar(0.5) && res>(1-res)*std::pow(m_conditionNumber, res)) { --res; ++*intpart; } return res; } template template void MatrixPower::computeIntPower(ResultType& res, RealScalar p) { RealScalar pp = std::abs(p); if (p<0) m_tmp = m_A.inverse(); else m_tmp = m_A; res = MatrixType::Identity(rows(), cols()); while (pp >= 1) { if (std::fmod(pp, 2) >= 1) res = m_tmp * res; m_tmp *= m_tmp; pp /= 2; } } template template void MatrixPower::computeFracPower(ResultType& res, RealScalar p) { if (p) { eigen_assert(m_conditionNumber); MatrixPowerAtomic(m_T, p).compute(m_fT); revertSchur(m_tmp, m_fT, m_U); res = m_tmp * res; } } template template inline void MatrixPower::revertSchur( Matrix& res, const ComplexMatrix& T, const ComplexMatrix& U) { res.noalias() = U * (T.template triangularView() * U.adjoint()); } template template inline void MatrixPower::revertSchur( Matrix& res, const ComplexMatrix& T, const ComplexMatrix& U) { res.noalias() = (U * (T.template triangularView() * U.adjoint())).real(); } /** * \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix power of some matrix (expression). * * \tparam Derived type of the base, a matrix (expression). * * This class holds the arguments to the matrix power until it is * assigned or evaluated for some other reason (so the argument * should not be changed in the meantime). It is the return type of * MatrixBase::pow() and related functions and most of the * time this is the only way it is used. */ template class MatrixPowerReturnValue : public ReturnByValue< MatrixPowerReturnValue > { public: typedef typename Derived::PlainObject PlainObject; typedef typename Derived::RealScalar RealScalar; typedef typename Derived::Index Index; /** * \brief Constructor. * * \param[in] A %Matrix (expression), the base of the matrix power. * \param[in] p scalar, the exponent of the matrix power. */ MatrixPowerReturnValue(const Derived& A, RealScalar p) : m_A(A), m_p(p) { } /** * \brief Compute the matrix power. * * \param[out] result \f$ A^p \f$ where \p A and \p p are as in the * constructor. */ template inline void evalTo(ResultType& res) const { MatrixPower(m_A.eval()).compute(res, m_p); } Index rows() const { return m_A.rows(); } Index cols() const { return m_A.cols(); } private: const Derived& m_A; const RealScalar m_p; MatrixPowerReturnValue& operator=(const MatrixPowerReturnValue&); }; namespace internal { template struct traits< MatrixPowerRetval > { typedef typename MatrixPowerType::PlainObject ReturnType; }; template struct traits< MatrixPowerReturnValue > { typedef typename Derived::PlainObject ReturnType; }; } template const MatrixPowerReturnValue MatrixBase::pow(const RealScalar& p) const { return MatrixPowerReturnValue(derived(), p); } } // namespace Eigen #endif // EIGEN_MATRIX_POWER RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h0000644000176200001440000005360412253717461027234 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2011 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_FUNCTION #define EIGEN_MATRIX_FUNCTION #include "StemFunction.h" #include "MatrixFunctionAtomic.h" namespace Eigen { /** \ingroup MatrixFunctions_Module * \brief Class for computing matrix functions. * \tparam MatrixType type of the argument of the matrix function, * expected to be an instantiation of the Matrix class template. * \tparam AtomicType type for computing matrix function of atomic blocks. * \tparam IsComplex used internally to select correct specialization. * * This class implements the Schur-Parlett algorithm for computing matrix functions. The spectrum of the * matrix is divided in clustered of eigenvalues that lies close together. This class delegates the * computation of the matrix function on every block corresponding to these clusters to an object of type * \p AtomicType and uses these results to compute the matrix function of the whole matrix. The class * \p AtomicType should have a \p compute() member function for computing the matrix function of a block. * * \sa class MatrixFunctionAtomic, class MatrixLogarithmAtomic */ template ::Scalar>::IsComplex> class MatrixFunction { public: /** \brief Constructor. * * \param[in] A argument of matrix function, should be a square matrix. * \param[in] atomic class for computing matrix function of atomic blocks. * * The class stores references to \p A and \p atomic, so they should not be * changed (or destroyed) before compute() is called. */ MatrixFunction(const MatrixType& A, AtomicType& atomic); /** \brief Compute the matrix function. * * \param[out] result the function \p f applied to \p A, as * specified in the constructor. * * See MatrixBase::matrixFunction() for details on how this computation * is implemented. */ template void compute(ResultType &result); }; /** \internal \ingroup MatrixFunctions_Module * \brief Partial specialization of MatrixFunction for real matrices */ template class MatrixFunction { private: typedef internal::traits Traits; typedef typename Traits::Scalar Scalar; static const int Rows = Traits::RowsAtCompileTime; static const int Cols = Traits::ColsAtCompileTime; static const int Options = MatrixType::Options; static const int MaxRows = Traits::MaxRowsAtCompileTime; static const int MaxCols = Traits::MaxColsAtCompileTime; typedef std::complex ComplexScalar; typedef Matrix ComplexMatrix; public: /** \brief Constructor. * * \param[in] A argument of matrix function, should be a square matrix. * \param[in] atomic class for computing matrix function of atomic blocks. */ MatrixFunction(const MatrixType& A, AtomicType& atomic) : m_A(A), m_atomic(atomic) { } /** \brief Compute the matrix function. * * \param[out] result the function \p f applied to \p A, as * specified in the constructor. * * This function converts the real matrix \c A to a complex matrix, * uses MatrixFunction and then converts the result back to * a real matrix. */ template void compute(ResultType& result) { ComplexMatrix CA = m_A.template cast(); ComplexMatrix Cresult; MatrixFunction mf(CA, m_atomic); mf.compute(Cresult); result = Cresult.real(); } private: typename internal::nested::type m_A; /**< \brief Reference to argument of matrix function. */ AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */ MatrixFunction& operator=(const MatrixFunction&); }; /** \internal \ingroup MatrixFunctions_Module * \brief Partial specialization of MatrixFunction for complex matrices */ template class MatrixFunction { private: typedef internal::traits Traits; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; static const int RowsAtCompileTime = Traits::RowsAtCompileTime; static const int ColsAtCompileTime = Traits::ColsAtCompileTime; static const int Options = MatrixType::Options; typedef typename NumTraits::Real RealScalar; typedef Matrix VectorType; typedef Matrix IntVectorType; typedef Matrix DynamicIntVectorType; typedef std::list Cluster; typedef std::list ListOfClusters; typedef Matrix DynMatrixType; public: MatrixFunction(const MatrixType& A, AtomicType& atomic); template void compute(ResultType& result); private: void computeSchurDecomposition(); void partitionEigenvalues(); typename ListOfClusters::iterator findCluster(Scalar key); void computeClusterSize(); void computeBlockStart(); void constructPermutation(); void permuteSchur(); void swapEntriesInSchur(Index index); void computeBlockAtomic(); Block block(MatrixType& A, Index i, Index j); void computeOffDiagonal(); DynMatrixType solveTriangularSylvester(const DynMatrixType& A, const DynMatrixType& B, const DynMatrixType& C); typename internal::nested::type m_A; /**< \brief Reference to argument of matrix function. */ AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */ MatrixType m_T; /**< \brief Triangular part of Schur decomposition */ MatrixType m_U; /**< \brief Unitary part of Schur decomposition */ MatrixType m_fT; /**< \brief %Matrix function applied to #m_T */ ListOfClusters m_clusters; /**< \brief Partition of eigenvalues into clusters of ei'vals "close" to each other */ DynamicIntVectorType m_eivalToCluster; /**< \brief m_eivalToCluster[i] = j means i-th ei'val is in j-th cluster */ DynamicIntVectorType m_clusterSize; /**< \brief Number of eigenvalues in each clusters */ DynamicIntVectorType m_blockStart; /**< \brief Row index at which block corresponding to i-th cluster starts */ IntVectorType m_permutation; /**< \brief Permutation which groups ei'vals in the same cluster together */ /** \brief Maximum distance allowed between eigenvalues to be considered "close". * * This is morally a \c static \c const \c Scalar, but only * integers can be static constant class members in C++. The * separation constant is set to 0.1, a value taken from the * paper by Davies and Higham. */ static const RealScalar separation() { return static_cast(0.1); } MatrixFunction& operator=(const MatrixFunction&); }; /** \brief Constructor. * * \param[in] A argument of matrix function, should be a square matrix. * \param[in] atomic class for computing matrix function of atomic blocks. */ template MatrixFunction::MatrixFunction(const MatrixType& A, AtomicType& atomic) : m_A(A), m_atomic(atomic) { /* empty body */ } /** \brief Compute the matrix function. * * \param[out] result the function \p f applied to \p A, as * specified in the constructor. */ template template void MatrixFunction::compute(ResultType& result) { computeSchurDecomposition(); partitionEigenvalues(); computeClusterSize(); computeBlockStart(); constructPermutation(); permuteSchur(); computeBlockAtomic(); computeOffDiagonal(); result = m_U * (m_fT.template triangularView() * m_U.adjoint()); } /** \brief Store the Schur decomposition of #m_A in #m_T and #m_U */ template void MatrixFunction::computeSchurDecomposition() { const ComplexSchur schurOfA(m_A); m_T = schurOfA.matrixT(); m_U = schurOfA.matrixU(); } /** \brief Partition eigenvalues in clusters of ei'vals close to each other * * This function computes #m_clusters. This is a partition of the * eigenvalues of #m_T in clusters, such that * # Any eigenvalue in a certain cluster is at most separation() away * from another eigenvalue in the same cluster. * # The distance between two eigenvalues in different clusters is * more than separation(). * The implementation follows Algorithm 4.1 in the paper of Davies * and Higham. */ template void MatrixFunction::partitionEigenvalues() { using std::abs; const Index rows = m_T.rows(); VectorType diag = m_T.diagonal(); // contains eigenvalues of A for (Index i=0; ibegin(), qi->end(), diag(j)) == qi->end()) { typename ListOfClusters::iterator qj = findCluster(diag(j)); if (qj == m_clusters.end()) { qi->push_back(diag(j)); } else { qi->insert(qi->end(), qj->begin(), qj->end()); m_clusters.erase(qj); } } } } } /** \brief Find cluster in #m_clusters containing some value * \param[in] key Value to find * \returns Iterator to cluster containing \c key, or * \c m_clusters.end() if no cluster in m_clusters contains \c key. */ template typename MatrixFunction::ListOfClusters::iterator MatrixFunction::findCluster(Scalar key) { typename Cluster::iterator j; for (typename ListOfClusters::iterator i = m_clusters.begin(); i != m_clusters.end(); ++i) { j = std::find(i->begin(), i->end(), key); if (j != i->end()) return i; } return m_clusters.end(); } /** \brief Compute #m_clusterSize and #m_eivalToCluster using #m_clusters */ template void MatrixFunction::computeClusterSize() { const Index rows = m_T.rows(); VectorType diag = m_T.diagonal(); const Index numClusters = static_cast(m_clusters.size()); m_clusterSize.setZero(numClusters); m_eivalToCluster.resize(rows); Index clusterIndex = 0; for (typename ListOfClusters::const_iterator cluster = m_clusters.begin(); cluster != m_clusters.end(); ++cluster) { for (Index i = 0; i < diag.rows(); ++i) { if (std::find(cluster->begin(), cluster->end(), diag(i)) != cluster->end()) { ++m_clusterSize[clusterIndex]; m_eivalToCluster[i] = clusterIndex; } } ++clusterIndex; } } /** \brief Compute #m_blockStart using #m_clusterSize */ template void MatrixFunction::computeBlockStart() { m_blockStart.resize(m_clusterSize.rows()); m_blockStart(0) = 0; for (Index i = 1; i < m_clusterSize.rows(); i++) { m_blockStart(i) = m_blockStart(i-1) + m_clusterSize(i-1); } } /** \brief Compute #m_permutation using #m_eivalToCluster and #m_blockStart */ template void MatrixFunction::constructPermutation() { DynamicIntVectorType indexNextEntry = m_blockStart; m_permutation.resize(m_T.rows()); for (Index i = 0; i < m_T.rows(); i++) { Index cluster = m_eivalToCluster[i]; m_permutation[i] = indexNextEntry[cluster]; ++indexNextEntry[cluster]; } } /** \brief Permute Schur decomposition in #m_U and #m_T according to #m_permutation */ template void MatrixFunction::permuteSchur() { IntVectorType p = m_permutation; for (Index i = 0; i < p.rows() - 1; i++) { Index j; for (j = i; j < p.rows(); j++) { if (p(j) == i) break; } eigen_assert(p(j) == i); for (Index k = j-1; k >= i; k--) { swapEntriesInSchur(k); std::swap(p.coeffRef(k), p.coeffRef(k+1)); } } } /** \brief Swap rows \a index and \a index+1 in Schur decomposition in #m_U and #m_T */ template void MatrixFunction::swapEntriesInSchur(Index index) { JacobiRotation rotation; rotation.makeGivens(m_T(index, index+1), m_T(index+1, index+1) - m_T(index, index)); m_T.applyOnTheLeft(index, index+1, rotation.adjoint()); m_T.applyOnTheRight(index, index+1, rotation); m_U.applyOnTheRight(index, index+1, rotation); } /** \brief Compute block diagonal part of #m_fT. * * This routine computes the matrix function applied to the block diagonal part of #m_T, with the blocking * given by #m_blockStart. The matrix function of each diagonal block is computed by #m_atomic. The * off-diagonal parts of #m_fT are set to zero. */ template void MatrixFunction::computeBlockAtomic() { m_fT.resize(m_T.rows(), m_T.cols()); m_fT.setZero(); for (Index i = 0; i < m_clusterSize.rows(); ++i) { block(m_fT, i, i) = m_atomic.compute(block(m_T, i, i)); } } /** \brief Return block of matrix according to blocking given by #m_blockStart */ template Block MatrixFunction::block(MatrixType& A, Index i, Index j) { return A.block(m_blockStart(i), m_blockStart(j), m_clusterSize(i), m_clusterSize(j)); } /** \brief Compute part of #m_fT above block diagonal. * * This routine assumes that the block diagonal part of #m_fT (which * equals the matrix function applied to #m_T) has already been computed and computes * the part above the block diagonal. The part below the diagonal is * zero, because #m_T is upper triangular. */ template void MatrixFunction::computeOffDiagonal() { for (Index diagIndex = 1; diagIndex < m_clusterSize.rows(); diagIndex++) { for (Index blockIndex = 0; blockIndex < m_clusterSize.rows() - diagIndex; blockIndex++) { // compute (blockIndex, blockIndex+diagIndex) block DynMatrixType A = block(m_T, blockIndex, blockIndex); DynMatrixType B = -block(m_T, blockIndex+diagIndex, blockIndex+diagIndex); DynMatrixType C = block(m_fT, blockIndex, blockIndex) * block(m_T, blockIndex, blockIndex+diagIndex); C -= block(m_T, blockIndex, blockIndex+diagIndex) * block(m_fT, blockIndex+diagIndex, blockIndex+diagIndex); for (Index k = blockIndex + 1; k < blockIndex + diagIndex; k++) { C += block(m_fT, blockIndex, k) * block(m_T, k, blockIndex+diagIndex); C -= block(m_T, blockIndex, k) * block(m_fT, k, blockIndex+diagIndex); } block(m_fT, blockIndex, blockIndex+diagIndex) = solveTriangularSylvester(A, B, C); } } } /** \brief Solve a triangular Sylvester equation AX + XB = C * * \param[in] A the matrix A; should be square and upper triangular * \param[in] B the matrix B; should be square and upper triangular * \param[in] C the matrix C; should have correct size. * * \returns the solution X. * * If A is m-by-m and B is n-by-n, then both C and X are m-by-n. * The (i,j)-th component of the Sylvester equation is * \f[ * \sum_{k=i}^m A_{ik} X_{kj} + \sum_{k=1}^j X_{ik} B_{kj} = C_{ij}. * \f] * This can be re-arranged to yield: * \f[ * X_{ij} = \frac{1}{A_{ii} + B_{jj}} \Bigl( C_{ij} * - \sum_{k=i+1}^m A_{ik} X_{kj} - \sum_{k=1}^{j-1} X_{ik} B_{kj} \Bigr). * \f] * It is assumed that A and B are such that the numerator is never * zero (otherwise the Sylvester equation does not have a unique * solution). In that case, these equations can be evaluated in the * order \f$ i=m,\ldots,1 \f$ and \f$ j=1,\ldots,n \f$. */ template typename MatrixFunction::DynMatrixType MatrixFunction::solveTriangularSylvester( const DynMatrixType& A, const DynMatrixType& B, const DynMatrixType& C) { eigen_assert(A.rows() == A.cols()); eigen_assert(A.isUpperTriangular()); eigen_assert(B.rows() == B.cols()); eigen_assert(B.isUpperTriangular()); eigen_assert(C.rows() == A.rows()); eigen_assert(C.cols() == B.rows()); Index m = A.rows(); Index n = B.rows(); DynMatrixType X(m, n); for (Index i = m - 1; i >= 0; --i) { for (Index j = 0; j < n; ++j) { // Compute AX = \sum_{k=i+1}^m A_{ik} X_{kj} Scalar AX; if (i == m - 1) { AX = 0; } else { Matrix AXmatrix = A.row(i).tail(m-1-i) * X.col(j).tail(m-1-i); AX = AXmatrix(0,0); } // Compute XB = \sum_{k=1}^{j-1} X_{ik} B_{kj} Scalar XB; if (j == 0) { XB = 0; } else { Matrix XBmatrix = X.row(i).head(j) * B.col(j).head(j); XB = XBmatrix(0,0); } X(i,j) = (C(i,j) - AX - XB) / (A(i,i) + B(j,j)); } } return X; } /** \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix function of some matrix (expression). * * \tparam Derived Type of the argument to the matrix function. * * This class holds the argument to the matrix function until it is * assigned or evaluated for some other reason (so the argument * should not be changed in the meantime). It is the return type of * matrixBase::matrixFunction() and related functions and most of the * time this is the only way it is used. */ template class MatrixFunctionReturnValue : public ReturnByValue > { public: typedef typename Derived::Scalar Scalar; typedef typename Derived::Index Index; typedef typename internal::stem_function::type StemFunction; /** \brief Constructor. * * \param[in] A %Matrix (expression) forming the argument of the * matrix function. * \param[in] f Stem function for matrix function under consideration. */ MatrixFunctionReturnValue(const Derived& A, StemFunction f) : m_A(A), m_f(f) { } /** \brief Compute the matrix function. * * \param[out] result \p f applied to \p A, where \p f and \p A * are as in the constructor. */ template inline void evalTo(ResultType& result) const { typedef typename Derived::PlainObject PlainObject; typedef internal::traits Traits; static const int RowsAtCompileTime = Traits::RowsAtCompileTime; static const int ColsAtCompileTime = Traits::ColsAtCompileTime; static const int Options = PlainObject::Options; typedef std::complex::Real> ComplexScalar; typedef Matrix DynMatrixType; typedef MatrixFunctionAtomic AtomicType; AtomicType atomic(m_f); const PlainObject Aevaluated = m_A.eval(); MatrixFunction mf(Aevaluated, atomic); mf.compute(result); } Index rows() const { return m_A.rows(); } Index cols() const { return m_A.cols(); } private: typename internal::nested::type m_A; StemFunction *m_f; MatrixFunctionReturnValue& operator=(const MatrixFunctionReturnValue&); }; namespace internal { template struct traits > { typedef typename Derived::PlainObject ReturnType; }; } /********** MatrixBase methods **********/ template const MatrixFunctionReturnValue MatrixBase::matrixFunction(typename internal::stem_function::Scalar>::type f) const { eigen_assert(rows() == cols()); return MatrixFunctionReturnValue(derived(), f); } template const MatrixFunctionReturnValue MatrixBase::sin() const { eigen_assert(rows() == cols()); typedef typename internal::stem_function::ComplexScalar ComplexScalar; return MatrixFunctionReturnValue(derived(), StdStemFunctions::sin); } template const MatrixFunctionReturnValue MatrixBase::cos() const { eigen_assert(rows() == cols()); typedef typename internal::stem_function::ComplexScalar ComplexScalar; return MatrixFunctionReturnValue(derived(), StdStemFunctions::cos); } template const MatrixFunctionReturnValue MatrixBase::sinh() const { eigen_assert(rows() == cols()); typedef typename internal::stem_function::ComplexScalar ComplexScalar; return MatrixFunctionReturnValue(derived(), StdStemFunctions::sinh); } template const MatrixFunctionReturnValue MatrixBase::cosh() const { eigen_assert(rows() == cols()); typedef typename internal::stem_function::ComplexScalar ComplexScalar; return MatrixFunctionReturnValue(derived(), StdStemFunctions::cosh); } } // end namespace Eigen #endif // EIGEN_MATRIX_FUNCTION RcppEigen/inst/include/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h0000644000176200001440000001043212253717461030361 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_FUNCTION_ATOMIC #define EIGEN_MATRIX_FUNCTION_ATOMIC namespace Eigen { /** \ingroup MatrixFunctions_Module * \class MatrixFunctionAtomic * \brief Helper class for computing matrix functions of atomic matrices. * * \internal * Here, an atomic matrix is a triangular matrix whose diagonal * entries are close to each other. */ template class MatrixFunctionAtomic { public: typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef typename NumTraits::Real RealScalar; typedef typename internal::stem_function::type StemFunction; typedef Matrix VectorType; /** \brief Constructor * \param[in] f matrix function to compute. */ MatrixFunctionAtomic(StemFunction f) : m_f(f) { } /** \brief Compute matrix function of atomic matrix * \param[in] A argument of matrix function, should be upper triangular and atomic * \returns f(A), the matrix function evaluated at the given matrix */ MatrixType compute(const MatrixType& A); private: // Prevent copying MatrixFunctionAtomic(const MatrixFunctionAtomic&); MatrixFunctionAtomic& operator=(const MatrixFunctionAtomic&); void computeMu(); bool taylorConverged(Index s, const MatrixType& F, const MatrixType& Fincr, const MatrixType& P); /** \brief Pointer to scalar function */ StemFunction* m_f; /** \brief Size of matrix function */ Index m_Arows; /** \brief Mean of eigenvalues */ Scalar m_avgEival; /** \brief Argument shifted by mean of eigenvalues */ MatrixType m_Ashifted; /** \brief Constant used to determine whether Taylor series has converged */ RealScalar m_mu; }; template MatrixType MatrixFunctionAtomic::compute(const MatrixType& A) { // TODO: Use that A is upper triangular m_Arows = A.rows(); m_avgEival = A.trace() / Scalar(RealScalar(m_Arows)); m_Ashifted = A - m_avgEival * MatrixType::Identity(m_Arows, m_Arows); computeMu(); MatrixType F = m_f(m_avgEival, 0) * MatrixType::Identity(m_Arows, m_Arows); MatrixType P = m_Ashifted; MatrixType Fincr; for (Index s = 1; s < 1.1 * m_Arows + 10; s++) { // upper limit is fairly arbitrary Fincr = m_f(m_avgEival, static_cast(s)) * P; F += Fincr; P = Scalar(RealScalar(1.0/(s + 1))) * P * m_Ashifted; if (taylorConverged(s, F, Fincr, P)) { return F; } } eigen_assert("Taylor series does not converge" && 0); return F; } /** \brief Compute \c m_mu. */ template void MatrixFunctionAtomic::computeMu() { const MatrixType N = MatrixType::Identity(m_Arows, m_Arows) - m_Ashifted; VectorType e = VectorType::Ones(m_Arows); N.template triangularView().solveInPlace(e); m_mu = e.cwiseAbs().maxCoeff(); } /** \brief Determine whether Taylor series has converged */ template bool MatrixFunctionAtomic::taylorConverged(Index s, const MatrixType& F, const MatrixType& Fincr, const MatrixType& P) { const Index n = F.rows(); const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff(); const RealScalar Fincr_norm = Fincr.cwiseAbs().rowwise().sum().maxCoeff(); if (Fincr_norm < NumTraits::epsilon() * F_norm) { RealScalar delta = 0; RealScalar rfactorial = 1; for (Index r = 0; r < n; r++) { RealScalar mx = 0; for (Index i = 0; i < n; i++) mx = (std::max)(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, static_cast(s+r)))); if (r != 0) rfactorial *= RealScalar(r); delta = (std::max)(delta, mx / rfactorial); } const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff(); if (m_mu * delta * P_norm < NumTraits::epsilon() * F_norm) return true; } return false; } } // end namespace Eigen #endif // EIGEN_MATRIX_FUNCTION_ATOMIC RcppEigen/inst/include/unsupported/Eigen/src/AutoDiff/0000755000176200001440000000000012647570720022447 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h0000644000176200001440000006272612647570720025464 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_AUTODIFF_SCALAR_H #define EIGEN_AUTODIFF_SCALAR_H namespace Eigen { namespace internal { template struct make_coherent_impl { static void run(A&, B&) {} }; // resize a to match b is a.size()==0, and conversely. template void make_coherent(const A& a, const B&b) { make_coherent_impl::run(a.const_cast_derived(), b.const_cast_derived()); } template struct auto_diff_special_op; } // end namespace internal /** \class AutoDiffScalar * \brief A scalar type replacement with automatic differentation capability * * \param _DerType the vector type used to store/represent the derivatives. The base scalar type * as well as the number of derivatives to compute are determined from this type. * Typical choices include, e.g., \c Vector4f for 4 derivatives, or \c VectorXf * if the number of derivatives is not known at compile time, and/or, the number * of derivatives is large. * Note that _DerType can also be a reference (e.g., \c VectorXf&) to wrap a * existing vector into an AutoDiffScalar. * Finally, _DerType can also be any Eigen compatible expression. * * This class represents a scalar value while tracking its respective derivatives using Eigen's expression * template mechanism. * * It supports the following list of global math function: * - std::abs, std::sqrt, std::pow, std::exp, std::log, std::sin, std::cos, * - internal::abs, internal::sqrt, numext::pow, internal::exp, internal::log, internal::sin, internal::cos, * - internal::conj, internal::real, internal::imag, numext::abs2. * * AutoDiffScalar can be used as the scalar type of an Eigen::Matrix object. However, * in that case, the expression template mechanism only occurs at the top Matrix level, * while derivatives are computed right away. * */ template class AutoDiffScalar : public internal::auto_diff_special_op <_DerType, !internal::is_same::type>::Scalar, typename NumTraits::type>::Scalar>::Real>::value> { public: typedef internal::auto_diff_special_op <_DerType, !internal::is_same::type>::Scalar, typename NumTraits::type>::Scalar>::Real>::value> Base; typedef typename internal::remove_all<_DerType>::type DerType; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real Real; using Base::operator+; using Base::operator*; /** Default constructor without any initialization. */ AutoDiffScalar() {} /** Constructs an active scalar from its \a value, and initializes the \a nbDer derivatives such that it corresponds to the \a derNumber -th variable */ AutoDiffScalar(const Scalar& value, int nbDer, int derNumber) : m_value(value), m_derivatives(DerType::Zero(nbDer)) { m_derivatives.coeffRef(derNumber) = Scalar(1); } /** Conversion from a scalar constant to an active scalar. * The derivatives are set to zero. */ /*explicit*/ AutoDiffScalar(const Real& value) : m_value(value) { if(m_derivatives.size()>0) m_derivatives.setZero(); } /** Constructs an active scalar from its \a value and derivatives \a der */ AutoDiffScalar(const Scalar& value, const DerType& der) : m_value(value), m_derivatives(der) {} template AutoDiffScalar(const AutoDiffScalar& other) : m_value(other.value()), m_derivatives(other.derivatives()) {} friend std::ostream & operator << (std::ostream & s, const AutoDiffScalar& a) { return s << a.value(); } AutoDiffScalar(const AutoDiffScalar& other) : m_value(other.value()), m_derivatives(other.derivatives()) {} template inline AutoDiffScalar& operator=(const AutoDiffScalar& other) { m_value = other.value(); m_derivatives = other.derivatives(); return *this; } inline AutoDiffScalar& operator=(const AutoDiffScalar& other) { m_value = other.value(); m_derivatives = other.derivatives(); return *this; } // inline operator const Scalar& () const { return m_value; } // inline operator Scalar& () { return m_value; } inline const Scalar& value() const { return m_value; } inline Scalar& value() { return m_value; } inline const DerType& derivatives() const { return m_derivatives; } inline DerType& derivatives() { return m_derivatives; } inline bool operator< (const Scalar& other) const { return m_value < other; } inline bool operator<=(const Scalar& other) const { return m_value <= other; } inline bool operator> (const Scalar& other) const { return m_value > other; } inline bool operator>=(const Scalar& other) const { return m_value >= other; } inline bool operator==(const Scalar& other) const { return m_value == other; } inline bool operator!=(const Scalar& other) const { return m_value != other; } friend inline bool operator< (const Scalar& a, const AutoDiffScalar& b) { return a < b.value(); } friend inline bool operator<=(const Scalar& a, const AutoDiffScalar& b) { return a <= b.value(); } friend inline bool operator> (const Scalar& a, const AutoDiffScalar& b) { return a > b.value(); } friend inline bool operator>=(const Scalar& a, const AutoDiffScalar& b) { return a >= b.value(); } friend inline bool operator==(const Scalar& a, const AutoDiffScalar& b) { return a == b.value(); } friend inline bool operator!=(const Scalar& a, const AutoDiffScalar& b) { return a != b.value(); } template inline bool operator< (const AutoDiffScalar& b) const { return m_value < b.value(); } template inline bool operator<=(const AutoDiffScalar& b) const { return m_value <= b.value(); } template inline bool operator> (const AutoDiffScalar& b) const { return m_value > b.value(); } template inline bool operator>=(const AutoDiffScalar& b) const { return m_value >= b.value(); } template inline bool operator==(const AutoDiffScalar& b) const { return m_value == b.value(); } template inline bool operator!=(const AutoDiffScalar& b) const { return m_value != b.value(); } inline const AutoDiffScalar operator+(const Scalar& other) const { return AutoDiffScalar(m_value + other, m_derivatives); } friend inline const AutoDiffScalar operator+(const Scalar& a, const AutoDiffScalar& b) { return AutoDiffScalar(a + b.value(), b.derivatives()); } // inline const AutoDiffScalar operator+(const Real& other) const // { // return AutoDiffScalar(m_value + other, m_derivatives); // } // friend inline const AutoDiffScalar operator+(const Real& a, const AutoDiffScalar& b) // { // return AutoDiffScalar(a + b.value(), b.derivatives()); // } inline AutoDiffScalar& operator+=(const Scalar& other) { value() += other; return *this; } template inline const AutoDiffScalar,const DerType,const typename internal::remove_all::type> > operator+(const AutoDiffScalar& other) const { internal::make_coherent(m_derivatives, other.derivatives()); return AutoDiffScalar,const DerType,const typename internal::remove_all::type> >( m_value + other.value(), m_derivatives + other.derivatives()); } template inline AutoDiffScalar& operator+=(const AutoDiffScalar& other) { (*this) = (*this) + other; return *this; } inline const AutoDiffScalar operator-(const Scalar& b) const { return AutoDiffScalar(m_value - b, m_derivatives); } friend inline const AutoDiffScalar, const DerType> > operator-(const Scalar& a, const AutoDiffScalar& b) { return AutoDiffScalar, const DerType> > (a - b.value(), -b.derivatives()); } inline AutoDiffScalar& operator-=(const Scalar& other) { value() -= other; return *this; } template inline const AutoDiffScalar, const DerType,const typename internal::remove_all::type> > operator-(const AutoDiffScalar& other) const { internal::make_coherent(m_derivatives, other.derivatives()); return AutoDiffScalar, const DerType,const typename internal::remove_all::type> >( m_value - other.value(), m_derivatives - other.derivatives()); } template inline AutoDiffScalar& operator-=(const AutoDiffScalar& other) { *this = *this - other; return *this; } inline const AutoDiffScalar, const DerType> > operator-() const { return AutoDiffScalar, const DerType> >( -m_value, -m_derivatives); } inline const AutoDiffScalar, const DerType> > operator*(const Scalar& other) const { return AutoDiffScalar, const DerType> >( m_value * other, (m_derivatives * other)); } friend inline const AutoDiffScalar, const DerType> > operator*(const Scalar& other, const AutoDiffScalar& a) { return AutoDiffScalar, const DerType> >( a.value() * other, a.derivatives() * other); } // inline const AutoDiffScalar, DerType>::Type > // operator*(const Real& other) const // { // return AutoDiffScalar, DerType>::Type >( // m_value * other, // (m_derivatives * other)); // } // // friend inline const AutoDiffScalar, DerType>::Type > // operator*(const Real& other, const AutoDiffScalar& a) // { // return AutoDiffScalar, DerType>::Type >( // a.value() * other, // a.derivatives() * other); // } inline const AutoDiffScalar, const DerType> > operator/(const Scalar& other) const { return AutoDiffScalar, const DerType> >( m_value / other, (m_derivatives * (Scalar(1)/other))); } friend inline const AutoDiffScalar, const DerType> > operator/(const Scalar& other, const AutoDiffScalar& a) { return AutoDiffScalar, const DerType> >( other / a.value(), a.derivatives() * (Scalar(-other) / (a.value()*a.value()))); } // inline const AutoDiffScalar, DerType>::Type > // operator/(const Real& other) const // { // return AutoDiffScalar, DerType>::Type >( // m_value / other, // (m_derivatives * (Real(1)/other))); // } // // friend inline const AutoDiffScalar, DerType>::Type > // operator/(const Real& other, const AutoDiffScalar& a) // { // return AutoDiffScalar, DerType>::Type >( // other / a.value(), // a.derivatives() * (-Real(1)/other)); // } template inline const AutoDiffScalar, const CwiseBinaryOp, const CwiseUnaryOp, const DerType>, const CwiseUnaryOp, const typename internal::remove_all::type > > > > operator/(const AutoDiffScalar& other) const { internal::make_coherent(m_derivatives, other.derivatives()); return AutoDiffScalar, const CwiseBinaryOp, const CwiseUnaryOp, const DerType>, const CwiseUnaryOp, const typename internal::remove_all::type > > > >( m_value / other.value(), ((m_derivatives * other.value()) - (m_value * other.derivatives())) * (Scalar(1)/(other.value()*other.value()))); } template inline const AutoDiffScalar, const CwiseUnaryOp, const DerType>, const CwiseUnaryOp, const typename internal::remove_all::type> > > operator*(const AutoDiffScalar& other) const { internal::make_coherent(m_derivatives, other.derivatives()); return AutoDiffScalar, const CwiseUnaryOp, const DerType>, const CwiseUnaryOp, const typename internal::remove_all::type > > >( m_value * other.value(), (m_derivatives * other.value()) + (m_value * other.derivatives())); } inline AutoDiffScalar& operator*=(const Scalar& other) { *this = *this * other; return *this; } template inline AutoDiffScalar& operator*=(const AutoDiffScalar& other) { *this = *this * other; return *this; } inline AutoDiffScalar& operator/=(const Scalar& other) { *this = *this / other; return *this; } template inline AutoDiffScalar& operator/=(const AutoDiffScalar& other) { *this = *this / other; return *this; } protected: Scalar m_value; DerType m_derivatives; }; namespace internal { template struct auto_diff_special_op<_DerType, true> // : auto_diff_scalar_op<_DerType, typename NumTraits::Real, // is_same::Real>::value> { typedef typename remove_all<_DerType>::type DerType; typedef typename traits::Scalar Scalar; typedef typename NumTraits::Real Real; // typedef auto_diff_scalar_op<_DerType, typename NumTraits::Real, // is_same::Real>::value> Base; // using Base::operator+; // using Base::operator+=; // using Base::operator-; // using Base::operator-=; // using Base::operator*; // using Base::operator*=; const AutoDiffScalar<_DerType>& derived() const { return *static_cast*>(this); } AutoDiffScalar<_DerType>& derived() { return *static_cast*>(this); } inline const AutoDiffScalar operator+(const Real& other) const { return AutoDiffScalar(derived().value() + other, derived().derivatives()); } friend inline const AutoDiffScalar operator+(const Real& a, const AutoDiffScalar<_DerType>& b) { return AutoDiffScalar(a + b.value(), b.derivatives()); } inline AutoDiffScalar<_DerType>& operator+=(const Real& other) { derived().value() += other; return derived(); } inline const AutoDiffScalar, DerType>::Type > operator*(const Real& other) const { return AutoDiffScalar, DerType>::Type >( derived().value() * other, derived().derivatives() * other); } friend inline const AutoDiffScalar, DerType>::Type > operator*(const Real& other, const AutoDiffScalar<_DerType>& a) { return AutoDiffScalar, DerType>::Type >( a.value() * other, a.derivatives() * other); } inline AutoDiffScalar<_DerType>& operator*=(const Scalar& other) { *this = *this * other; return derived(); } }; template struct auto_diff_special_op<_DerType, false> { void operator*() const; void operator-() const; void operator+() const; }; template struct make_coherent_impl, B> { typedef Matrix A; static void run(A& a, B& b) { if((A_Rows==Dynamic || A_Cols==Dynamic) && (a.size()==0)) { a.resize(b.size()); a.setZero(); } } }; template struct make_coherent_impl > { typedef Matrix B; static void run(A& a, B& b) { if((B_Rows==Dynamic || B_Cols==Dynamic) && (b.size()==0)) { b.resize(a.size()); b.setZero(); } } }; template struct make_coherent_impl, Matrix > { typedef Matrix A; typedef Matrix B; static void run(A& a, B& b) { if((A_Rows==Dynamic || A_Cols==Dynamic) && (a.size()==0)) { a.resize(b.size()); a.setZero(); } else if((B_Rows==Dynamic || B_Cols==Dynamic) && (b.size()==0)) { b.resize(a.size()); b.setZero(); } } }; template struct scalar_product_traits,A_Scalar> { enum { Defined = 1 }; typedef Matrix ReturnType; }; template struct scalar_product_traits > { enum { Defined = 1 }; typedef Matrix ReturnType; }; template struct scalar_product_traits,typename DerType::Scalar> { enum { Defined = 1 }; typedef AutoDiffScalar ReturnType; }; template struct scalar_product_traits > { enum { Defined = 1 }; typedef AutoDiffScalar ReturnType; }; } // end namespace internal #define EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(FUNC,CODE) \ template \ inline const Eigen::AutoDiffScalar::type>::Scalar>, const typename Eigen::internal::remove_all::type> > \ FUNC(const Eigen::AutoDiffScalar& x) { \ using namespace Eigen; \ typedef typename Eigen::internal::traits::type>::Scalar Scalar; \ typedef AutoDiffScalar, const typename Eigen::internal::remove_all::type> > ReturnType; \ CODE; \ } template inline const AutoDiffScalar& conj(const AutoDiffScalar& x) { return x; } template inline const AutoDiffScalar& real(const AutoDiffScalar& x) { return x; } template inline typename DerType::Scalar imag(const AutoDiffScalar&) { return 0.; } template inline AutoDiffScalar (min)(const AutoDiffScalar& x, const T& y) { return (x <= y ? x : y); } template inline AutoDiffScalar (max)(const AutoDiffScalar& x, const T& y) { return (x >= y ? x : y); } template inline AutoDiffScalar (min)(const T& x, const AutoDiffScalar& y) { return (x < y ? x : y); } template inline AutoDiffScalar (max)(const T& x, const AutoDiffScalar& y) { return (x > y ? x : y); } EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs, using std::abs; return ReturnType(abs(x.value()), x.derivatives() * (x.value()<0 ? -1 : 1) );) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs2, using numext::abs2; return ReturnType(abs2(x.value()), x.derivatives() * (Scalar(2)*x.value()));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sqrt, using std::sqrt; Scalar sqrtx = sqrt(x.value()); return ReturnType(sqrtx,x.derivatives() * (Scalar(0.5) / sqrtx));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(cos, using std::cos; using std::sin; return ReturnType(cos(x.value()), x.derivatives() * (-sin(x.value())));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sin, using std::sin; using std::cos; return ReturnType(sin(x.value()),x.derivatives() * cos(x.value()));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(exp, using std::exp; Scalar expx = exp(x.value()); return ReturnType(expx,x.derivatives() * expx);) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(log, using std::log; return ReturnType(log(x.value()),x.derivatives() * (Scalar(1)/x.value()));) template inline const Eigen::AutoDiffScalar::Scalar>, const DerType> > pow(const Eigen::AutoDiffScalar& x, typename Eigen::internal::traits::Scalar y) { using namespace Eigen; typedef typename Eigen::internal::traits::Scalar Scalar; return AutoDiffScalar, const DerType> >( std::pow(x.value(),y), x.derivatives() * (y * std::pow(x.value(),y-1))); } template inline const AutoDiffScalar::Scalar,Dynamic,1> > atan2(const AutoDiffScalar& a, const AutoDiffScalar& b) { using std::atan2; using std::max; typedef typename internal::traits::Scalar Scalar; typedef AutoDiffScalar > PlainADS; PlainADS ret; ret.value() = atan2(a.value(), b.value()); Scalar tmp2 = a.value() * a.value(); Scalar tmp3 = b.value() * b.value(); Scalar tmp4 = tmp3/(tmp2+tmp3); if (tmp4!=0) ret.derivatives() = (a.derivatives() * b.value() - a.value() * b.derivatives()) * (tmp2+tmp3); return ret; } EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(tan, using std::tan; using std::cos; return ReturnType(tan(x.value()),x.derivatives() * (Scalar(1)/numext::abs2(cos(x.value()))));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(asin, using std::sqrt; using std::asin; return ReturnType(asin(x.value()),x.derivatives() * (Scalar(1)/sqrt(1-numext::abs2(x.value()))));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(acos, using std::sqrt; using std::acos; return ReturnType(acos(x.value()),x.derivatives() * (Scalar(-1)/sqrt(1-numext::abs2(x.value()))));) #undef EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY template struct NumTraits > : NumTraits< typename NumTraits::Real > { typedef AutoDiffScalar::Real,DerType::RowsAtCompileTime,DerType::ColsAtCompileTime> > Real; typedef AutoDiffScalar NonInteger; typedef AutoDiffScalar Nested; enum{ RequireInitialization = 1 }; }; } #endif // EIGEN_AUTODIFF_SCALAR_H RcppEigen/inst/include/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h0000644000176200001440000000445412253717461025755 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_AUTODIFF_JACOBIAN_H #define EIGEN_AUTODIFF_JACOBIAN_H namespace Eigen { template class AutoDiffJacobian : public Functor { public: AutoDiffJacobian() : Functor() {} AutoDiffJacobian(const Functor& f) : Functor(f) {} // forward constructors template AutoDiffJacobian(const T0& a0) : Functor(a0) {} template AutoDiffJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {} template AutoDiffJacobian(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2) {} enum { InputsAtCompileTime = Functor::InputsAtCompileTime, ValuesAtCompileTime = Functor::ValuesAtCompileTime }; typedef typename Functor::InputType InputType; typedef typename Functor::ValueType ValueType; typedef typename Functor::JacobianType JacobianType; typedef typename JacobianType::Scalar Scalar; typedef typename JacobianType::Index Index; typedef Matrix DerivativeType; typedef AutoDiffScalar ActiveScalar; typedef Matrix ActiveInput; typedef Matrix ActiveValue; void operator() (const InputType& x, ValueType* v, JacobianType* _jac=0) const { eigen_assert(v!=0); if (!_jac) { Functor::operator()(x, v); return; } JacobianType& jac = *_jac; ActiveInput ax = x.template cast(); ActiveValue av(jac.rows()); if(InputsAtCompileTime==Dynamic) for (Index j=0; jinputs()); for (Index i=0; iinputs(),i); Functor::operator()(ax, &av); for (Index i=0; i // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_AUTODIFF_VECTOR_H #define EIGEN_AUTODIFF_VECTOR_H namespace Eigen { /* \class AutoDiffScalar * \brief A scalar type replacement with automatic differentation capability * * \param DerType the vector type used to store/represent the derivatives (e.g. Vector3f) * * This class represents a scalar value while tracking its respective derivatives. * * It supports the following list of global math function: * - std::abs, std::sqrt, std::pow, std::exp, std::log, std::sin, std::cos, * - internal::abs, internal::sqrt, numext::pow, internal::exp, internal::log, internal::sin, internal::cos, * - internal::conj, internal::real, internal::imag, numext::abs2. * * AutoDiffScalar can be used as the scalar type of an Eigen::Matrix object. However, * in that case, the expression template mechanism only occurs at the top Matrix level, * while derivatives are computed right away. * */ template class AutoDiffVector { public: //typedef typename internal::traits::Scalar Scalar; typedef typename internal::traits::Scalar BaseScalar; typedef AutoDiffScalar > ActiveScalar; typedef ActiveScalar Scalar; typedef AutoDiffScalar CoeffType; typedef typename JacobianType::Index Index; inline AutoDiffVector() {} inline AutoDiffVector(const ValueType& values) : m_values(values) { m_jacobian.setZero(); } CoeffType operator[] (Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); } const CoeffType operator[] (Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); } CoeffType operator() (Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); } const CoeffType operator() (Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); } CoeffType coeffRef(Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); } const CoeffType coeffRef(Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); } Index size() const { return m_values.size(); } // FIXME here we could return an expression of the sum Scalar sum() const { /*std::cerr << "sum \n\n";*/ /*std::cerr << m_jacobian.rowwise().sum() << "\n\n";*/ return Scalar(m_values.sum(), m_jacobian.rowwise().sum()); } inline AutoDiffVector(const ValueType& values, const JacobianType& jac) : m_values(values), m_jacobian(jac) {} template inline AutoDiffVector(const AutoDiffVector& other) : m_values(other.values()), m_jacobian(other.jacobian()) {} inline AutoDiffVector(const AutoDiffVector& other) : m_values(other.values()), m_jacobian(other.jacobian()) {} template inline AutoDiffVector& operator=(const AutoDiffVector& other) { m_values = other.values(); m_jacobian = other.jacobian(); return *this; } inline AutoDiffVector& operator=(const AutoDiffVector& other) { m_values = other.values(); m_jacobian = other.jacobian(); return *this; } inline const ValueType& values() const { return m_values; } inline ValueType& values() { return m_values; } inline const JacobianType& jacobian() const { return m_jacobian; } inline JacobianType& jacobian() { return m_jacobian; } template inline const AutoDiffVector< typename MakeCwiseBinaryOp,ValueType,OtherValueType>::Type, typename MakeCwiseBinaryOp,JacobianType,OtherJacobianType>::Type > operator+(const AutoDiffVector& other) const { return AutoDiffVector< typename MakeCwiseBinaryOp,ValueType,OtherValueType>::Type, typename MakeCwiseBinaryOp,JacobianType,OtherJacobianType>::Type >( m_values + other.values(), m_jacobian + other.jacobian()); } template inline AutoDiffVector& operator+=(const AutoDiffVector& other) { m_values += other.values(); m_jacobian += other.jacobian(); return *this; } template inline const AutoDiffVector< typename MakeCwiseBinaryOp,ValueType,OtherValueType>::Type, typename MakeCwiseBinaryOp,JacobianType,OtherJacobianType>::Type > operator-(const AutoDiffVector& other) const { return AutoDiffVector< typename MakeCwiseBinaryOp,ValueType,OtherValueType>::Type, typename MakeCwiseBinaryOp,JacobianType,OtherJacobianType>::Type >( m_values - other.values(), m_jacobian - other.jacobian()); } template inline AutoDiffVector& operator-=(const AutoDiffVector& other) { m_values -= other.values(); m_jacobian -= other.jacobian(); return *this; } inline const AutoDiffVector< typename MakeCwiseUnaryOp, ValueType>::Type, typename MakeCwiseUnaryOp, JacobianType>::Type > operator-() const { return AutoDiffVector< typename MakeCwiseUnaryOp, ValueType>::Type, typename MakeCwiseUnaryOp, JacobianType>::Type >( -m_values, -m_jacobian); } inline const AutoDiffVector< typename MakeCwiseUnaryOp, ValueType>::Type, typename MakeCwiseUnaryOp, JacobianType>::Type> operator*(const BaseScalar& other) const { return AutoDiffVector< typename MakeCwiseUnaryOp, ValueType>::Type, typename MakeCwiseUnaryOp, JacobianType>::Type >( m_values * other, m_jacobian * other); } friend inline const AutoDiffVector< typename MakeCwiseUnaryOp, ValueType>::Type, typename MakeCwiseUnaryOp, JacobianType>::Type > operator*(const Scalar& other, const AutoDiffVector& v) { return AutoDiffVector< typename MakeCwiseUnaryOp, ValueType>::Type, typename MakeCwiseUnaryOp, JacobianType>::Type >( v.values() * other, v.jacobian() * other); } // template // inline const AutoDiffVector< // CwiseBinaryOp, ValueType, OtherValueType> // CwiseBinaryOp, // CwiseUnaryOp, JacobianType>, // CwiseUnaryOp, OtherJacobianType> > > // operator*(const AutoDiffVector& other) const // { // return AutoDiffVector< // CwiseBinaryOp, ValueType, OtherValueType> // CwiseBinaryOp, // CwiseUnaryOp, JacobianType>, // CwiseUnaryOp, OtherJacobianType> > >( // m_values.cwise() * other.values(), // (m_jacobian * other.values()) + (m_values * other.jacobian())); // } inline AutoDiffVector& operator*=(const Scalar& other) { m_values *= other; m_jacobian *= other; return *this; } template inline AutoDiffVector& operator*=(const AutoDiffVector& other) { *this = *this * other; return *this; } protected: ValueType m_values; JacobianType m_jacobian; }; } #endif // EIGEN_AUTODIFF_VECTOR_H RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/0000755000176200001440000000000012253717461025240 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h0000644000176200001440000000252212253717461026737 0ustar liggesusersnamespace Eigen { namespace internal { template void rwupdt( Matrix< Scalar, Dynamic, Dynamic > &r, const Matrix< Scalar, Dynamic, 1> &w, Matrix< Scalar, Dynamic, 1> &b, Scalar alpha) { typedef DenseIndex Index; const Index n = r.cols(); eigen_assert(r.rows()>=n); std::vector > givens(n); /* Local variables */ Scalar temp, rowj; /* Function Body */ for (Index j = 0; j < n; ++j) { rowj = w[j]; /* apply the previous transformations to */ /* r(i,j), i=0,1,...,j-1, and to w(j). */ for (Index i = 0; i < j; ++i) { temp = givens[i].c() * r(i,j) + givens[i].s() * rowj; rowj = -givens[i].s() * r(i,j) + givens[i].c() * rowj; r(i,j) = temp; } /* determine a givens rotation which eliminates w(j). */ givens[j].makeGivens(-r(j,j), rowj); if (rowj == 0.) continue; // givens[j] is identity /* apply the current transformation to r(j,j), b(j), and alpha. */ r(j,j) = givens[j].c() * r(j,j) + givens[j].s() * rowj; temp = givens[j].c() * b[j] + givens[j].s() * alpha; alpha = -givens[j].s() * b[j] + givens[j].c() * alpha; b[j] = temp; } } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h0000644000176200001440000005316512253717461031215 0ustar liggesusers// -*- coding: utf-8 // vim: set fileencoding=utf-8 // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LEVENBERGMARQUARDT__H #define EIGEN_LEVENBERGMARQUARDT__H namespace Eigen { namespace LevenbergMarquardtSpace { enum Status { NotStarted = -2, Running = -1, ImproperInputParameters = 0, RelativeReductionTooSmall = 1, RelativeErrorTooSmall = 2, RelativeErrorAndReductionTooSmall = 3, CosinusTooSmall = 4, TooManyFunctionEvaluation = 5, FtolTooSmall = 6, XtolTooSmall = 7, GtolTooSmall = 8, UserAsked = 9 }; } /** * \ingroup NonLinearOptimization_Module * \brief Performs non linear optimization over a non-linear function, * using a variant of the Levenberg Marquardt algorithm. * * Check wikipedia for more information. * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm */ template class LevenbergMarquardt { public: LevenbergMarquardt(FunctorType &_functor) : functor(_functor) { nfev = njev = iter = 0; fnorm = gnorm = 0.; useExternalScaling=false; } typedef DenseIndex Index; struct Parameters { Parameters() : factor(Scalar(100.)) , maxfev(400) , ftol(std::sqrt(NumTraits::epsilon())) , xtol(std::sqrt(NumTraits::epsilon())) , gtol(Scalar(0.)) , epsfcn(Scalar(0.)) {} Scalar factor; Index maxfev; // maximum number of function evaluation Scalar ftol; Scalar xtol; Scalar gtol; Scalar epsfcn; }; typedef Matrix< Scalar, Dynamic, 1 > FVectorType; typedef Matrix< Scalar, Dynamic, Dynamic > JacobianType; LevenbergMarquardtSpace::Status lmder1( FVectorType &x, const Scalar tol = std::sqrt(NumTraits::epsilon()) ); LevenbergMarquardtSpace::Status minimize(FVectorType &x); LevenbergMarquardtSpace::Status minimizeInit(FVectorType &x); LevenbergMarquardtSpace::Status minimizeOneStep(FVectorType &x); static LevenbergMarquardtSpace::Status lmdif1( FunctorType &functor, FVectorType &x, Index *nfev, const Scalar tol = std::sqrt(NumTraits::epsilon()) ); LevenbergMarquardtSpace::Status lmstr1( FVectorType &x, const Scalar tol = std::sqrt(NumTraits::epsilon()) ); LevenbergMarquardtSpace::Status minimizeOptimumStorage(FVectorType &x); LevenbergMarquardtSpace::Status minimizeOptimumStorageInit(FVectorType &x); LevenbergMarquardtSpace::Status minimizeOptimumStorageOneStep(FVectorType &x); void resetParameters(void) { parameters = Parameters(); } Parameters parameters; FVectorType fvec, qtf, diag; JacobianType fjac; PermutationMatrix permutation; Index nfev; Index njev; Index iter; Scalar fnorm, gnorm; bool useExternalScaling; Scalar lm_param(void) { return par; } private: FunctorType &functor; Index n; Index m; FVectorType wa1, wa2, wa3, wa4; Scalar par, sum; Scalar temp, temp1, temp2; Scalar delta; Scalar ratio; Scalar pnorm, xnorm, fnorm1, actred, dirder, prered; LevenbergMarquardt& operator=(const LevenbergMarquardt&); }; template LevenbergMarquardtSpace::Status LevenbergMarquardt::lmder1( FVectorType &x, const Scalar tol ) { n = x.size(); m = functor.values(); /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) return LevenbergMarquardtSpace::ImproperInputParameters; resetParameters(); parameters.ftol = tol; parameters.xtol = tol; parameters.maxfev = 100*(n+1); return minimize(x); } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimize(FVectorType &x) { LevenbergMarquardtSpace::Status status = minimizeInit(x); if (status==LevenbergMarquardtSpace::ImproperInputParameters) return status; do { status = minimizeOneStep(x); } while (status==LevenbergMarquardtSpace::Running); return status; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeInit(FVectorType &x) { n = x.size(); m = functor.values(); wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(m); fvec.resize(m); fjac.resize(m, n); if (!useExternalScaling) diag.resize(n); eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'"); qtf.resize(n); /* Function Body */ nfev = 0; njev = 0; /* check the input parameters for errors. */ if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.) return LevenbergMarquardtSpace::ImproperInputParameters; if (useExternalScaling) for (Index j = 0; j < n; ++j) if (diag[j] <= 0.) return LevenbergMarquardtSpace::ImproperInputParameters; /* evaluate the function at the starting point */ /* and calculate its norm. */ nfev = 1; if ( functor(x, fvec) < 0) return LevenbergMarquardtSpace::UserAsked; fnorm = fvec.stableNorm(); /* initialize levenberg-marquardt parameter and iteration counter. */ par = 0.; iter = 1; return LevenbergMarquardtSpace::NotStarted; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeOneStep(FVectorType &x) { using std::abs; using std::sqrt; eigen_assert(x.size()==n); // check the caller is not cheating us /* calculate the jacobian matrix. */ Index df_ret = functor.df(x, fjac); if (df_ret<0) return LevenbergMarquardtSpace::UserAsked; if (df_ret>0) // numerical diff, we evaluated the function df_ret times nfev += df_ret; else njev++; /* compute the qr factorization of the jacobian. */ wa2 = fjac.colwise().blueNorm(); ColPivHouseholderQR qrfac(fjac); fjac = qrfac.matrixQR(); permutation = qrfac.colsPermutation(); /* on the first iteration and if external scaling is not used, scale according */ /* to the norms of the columns of the initial jacobian. */ if (iter == 1) { if (!useExternalScaling) for (Index j = 0; j < n; ++j) diag[j] = (wa2[j]==0.)? 1. : wa2[j]; /* on the first iteration, calculate the norm of the scaled x */ /* and initialize the step bound delta. */ xnorm = diag.cwiseProduct(x).stableNorm(); delta = parameters.factor * xnorm; if (delta == 0.) delta = parameters.factor; } /* form (q transpose)*fvec and store the first n components in */ /* qtf. */ wa4 = fvec; wa4.applyOnTheLeft(qrfac.householderQ().adjoint()); qtf = wa4.head(n); /* compute the norm of the scaled gradient. */ gnorm = 0.; if (fnorm != 0.) for (Index j = 0; j < n; ++j) if (wa2[permutation.indices()[j]] != 0.) gnorm = (std::max)(gnorm, abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); /* test for convergence of the gradient norm. */ if (gnorm <= parameters.gtol) return LevenbergMarquardtSpace::CosinusTooSmall; /* rescale if necessary. */ if (!useExternalScaling) diag = diag.cwiseMax(wa2); do { /* determine the levenberg-marquardt parameter. */ internal::lmpar2(qrfac, diag, qtf, delta, par, wa1); /* store the direction p and x + p. calculate the norm of p. */ wa1 = -wa1; wa2 = x + wa1; pnorm = diag.cwiseProduct(wa1).stableNorm(); /* on the first iteration, adjust the initial step bound. */ if (iter == 1) delta = (std::min)(delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( functor(wa2, wa4) < 0) return LevenbergMarquardtSpace::UserAsked; ++nfev; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ actred = -1.; if (Scalar(.1) * fnorm1 < fnorm) actred = 1. - numext::abs2(fnorm1 / fnorm); /* compute the scaled predicted reduction and */ /* the scaled directional derivative. */ wa3 = fjac.template triangularView() * (qrfac.colsPermutation().inverse() *wa1); temp1 = numext::abs2(wa3.stableNorm() / fnorm); temp2 = numext::abs2(sqrt(par) * pnorm / fnorm); prered = temp1 + temp2 / Scalar(.5); dirder = -(temp1 + temp2); /* compute the ratio of the actual to the predicted */ /* reduction. */ ratio = 0.; if (prered != 0.) ratio = actred / prered; /* update the step bound. */ if (ratio <= Scalar(.25)) { if (actred >= 0.) temp = Scalar(.5); if (actred < 0.) temp = Scalar(.5) * dirder / (dirder + Scalar(.5) * actred); if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1)) temp = Scalar(.1); /* Computing MIN */ delta = temp * (std::min)(delta, pnorm / Scalar(.1)); par /= temp; } else if (!(par != 0. && ratio < Scalar(.75))) { delta = pnorm / Scalar(.5); par = Scalar(.5) * par; } /* test for successful iteration. */ if (ratio >= Scalar(1e-4)) { /* successful iteration. update x, fvec, and their norms. */ x = wa2; wa2 = diag.cwiseProduct(x); fvec = wa4; xnorm = wa2.stableNorm(); fnorm = fnorm1; ++iter; } /* tests for convergence. */ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm) return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall; if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.) return LevenbergMarquardtSpace::RelativeReductionTooSmall; if (delta <= parameters.xtol * xnorm) return LevenbergMarquardtSpace::RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= parameters.maxfev) return LevenbergMarquardtSpace::TooManyFunctionEvaluation; if (abs(actred) <= NumTraits::epsilon() && prered <= NumTraits::epsilon() && Scalar(.5) * ratio <= 1.) return LevenbergMarquardtSpace::FtolTooSmall; if (delta <= NumTraits::epsilon() * xnorm) return LevenbergMarquardtSpace::XtolTooSmall; if (gnorm <= NumTraits::epsilon()) return LevenbergMarquardtSpace::GtolTooSmall; } while (ratio < Scalar(1e-4)); return LevenbergMarquardtSpace::Running; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::lmstr1( FVectorType &x, const Scalar tol ) { n = x.size(); m = functor.values(); /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) return LevenbergMarquardtSpace::ImproperInputParameters; resetParameters(); parameters.ftol = tol; parameters.xtol = tol; parameters.maxfev = 100*(n+1); return minimizeOptimumStorage(x); } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeOptimumStorageInit(FVectorType &x) { n = x.size(); m = functor.values(); wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(m); fvec.resize(m); // Only R is stored in fjac. Q is only used to compute 'qtf', which is // Q.transpose()*rhs. qtf will be updated using givens rotation, // instead of storing them in Q. // The purpose it to only use a nxn matrix, instead of mxn here, so // that we can handle cases where m>>n : fjac.resize(n, n); if (!useExternalScaling) diag.resize(n); eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'"); qtf.resize(n); /* Function Body */ nfev = 0; njev = 0; /* check the input parameters for errors. */ if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.) return LevenbergMarquardtSpace::ImproperInputParameters; if (useExternalScaling) for (Index j = 0; j < n; ++j) if (diag[j] <= 0.) return LevenbergMarquardtSpace::ImproperInputParameters; /* evaluate the function at the starting point */ /* and calculate its norm. */ nfev = 1; if ( functor(x, fvec) < 0) return LevenbergMarquardtSpace::UserAsked; fnorm = fvec.stableNorm(); /* initialize levenberg-marquardt parameter and iteration counter. */ par = 0.; iter = 1; return LevenbergMarquardtSpace::NotStarted; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeOptimumStorageOneStep(FVectorType &x) { using std::abs; using std::sqrt; eigen_assert(x.size()==n); // check the caller is not cheating us Index i, j; bool sing; /* compute the qr factorization of the jacobian matrix */ /* calculated one row at a time, while simultaneously */ /* forming (q transpose)*fvec and storing the first */ /* n components in qtf. */ qtf.fill(0.); fjac.fill(0.); Index rownb = 2; for (i = 0; i < m; ++i) { if (functor.df(x, wa3, rownb) < 0) return LevenbergMarquardtSpace::UserAsked; internal::rwupdt(fjac, wa3, qtf, fvec[i]); ++rownb; } ++njev; /* if the jacobian is rank deficient, call qrfac to */ /* reorder its columns and update the components of qtf. */ sing = false; for (j = 0; j < n; ++j) { if (fjac(j,j) == 0.) sing = true; wa2[j] = fjac.col(j).head(j).stableNorm(); } permutation.setIdentity(n); if (sing) { wa2 = fjac.colwise().blueNorm(); // TODO We have no unit test covering this code path, do not modify // until it is carefully tested ColPivHouseholderQR qrfac(fjac); fjac = qrfac.matrixQR(); wa1 = fjac.diagonal(); fjac.diagonal() = qrfac.hCoeffs(); permutation = qrfac.colsPermutation(); // TODO : avoid this: for(Index ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii); // rescale vectors for (j = 0; j < n; ++j) { if (fjac(j,j) != 0.) { sum = 0.; for (i = j; i < n; ++i) sum += fjac(i,j) * qtf[i]; temp = -sum / fjac(j,j); for (i = j; i < n; ++i) qtf[i] += fjac(i,j) * temp; } fjac(j,j) = wa1[j]; } } /* on the first iteration and if external scaling is not used, scale according */ /* to the norms of the columns of the initial jacobian. */ if (iter == 1) { if (!useExternalScaling) for (j = 0; j < n; ++j) diag[j] = (wa2[j]==0.)? 1. : wa2[j]; /* on the first iteration, calculate the norm of the scaled x */ /* and initialize the step bound delta. */ xnorm = diag.cwiseProduct(x).stableNorm(); delta = parameters.factor * xnorm; if (delta == 0.) delta = parameters.factor; } /* compute the norm of the scaled gradient. */ gnorm = 0.; if (fnorm != 0.) for (j = 0; j < n; ++j) if (wa2[permutation.indices()[j]] != 0.) gnorm = (std::max)(gnorm, abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); /* test for convergence of the gradient norm. */ if (gnorm <= parameters.gtol) return LevenbergMarquardtSpace::CosinusTooSmall; /* rescale if necessary. */ if (!useExternalScaling) diag = diag.cwiseMax(wa2); do { /* determine the levenberg-marquardt parameter. */ internal::lmpar(fjac, permutation.indices(), diag, qtf, delta, par, wa1); /* store the direction p and x + p. calculate the norm of p. */ wa1 = -wa1; wa2 = x + wa1; pnorm = diag.cwiseProduct(wa1).stableNorm(); /* on the first iteration, adjust the initial step bound. */ if (iter == 1) delta = (std::min)(delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( functor(wa2, wa4) < 0) return LevenbergMarquardtSpace::UserAsked; ++nfev; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ actred = -1.; if (Scalar(.1) * fnorm1 < fnorm) actred = 1. - numext::abs2(fnorm1 / fnorm); /* compute the scaled predicted reduction and */ /* the scaled directional derivative. */ wa3 = fjac.topLeftCorner(n,n).template triangularView() * (permutation.inverse() * wa1); temp1 = numext::abs2(wa3.stableNorm() / fnorm); temp2 = numext::abs2(sqrt(par) * pnorm / fnorm); prered = temp1 + temp2 / Scalar(.5); dirder = -(temp1 + temp2); /* compute the ratio of the actual to the predicted */ /* reduction. */ ratio = 0.; if (prered != 0.) ratio = actred / prered; /* update the step bound. */ if (ratio <= Scalar(.25)) { if (actred >= 0.) temp = Scalar(.5); if (actred < 0.) temp = Scalar(.5) * dirder / (dirder + Scalar(.5) * actred); if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1)) temp = Scalar(.1); /* Computing MIN */ delta = temp * (std::min)(delta, pnorm / Scalar(.1)); par /= temp; } else if (!(par != 0. && ratio < Scalar(.75))) { delta = pnorm / Scalar(.5); par = Scalar(.5) * par; } /* test for successful iteration. */ if (ratio >= Scalar(1e-4)) { /* successful iteration. update x, fvec, and their norms. */ x = wa2; wa2 = diag.cwiseProduct(x); fvec = wa4; xnorm = wa2.stableNorm(); fnorm = fnorm1; ++iter; } /* tests for convergence. */ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm) return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall; if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.) return LevenbergMarquardtSpace::RelativeReductionTooSmall; if (delta <= parameters.xtol * xnorm) return LevenbergMarquardtSpace::RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= parameters.maxfev) return LevenbergMarquardtSpace::TooManyFunctionEvaluation; if (abs(actred) <= NumTraits::epsilon() && prered <= NumTraits::epsilon() && Scalar(.5) * ratio <= 1.) return LevenbergMarquardtSpace::FtolTooSmall; if (delta <= NumTraits::epsilon() * xnorm) return LevenbergMarquardtSpace::XtolTooSmall; if (gnorm <= NumTraits::epsilon()) return LevenbergMarquardtSpace::GtolTooSmall; } while (ratio < Scalar(1e-4)); return LevenbergMarquardtSpace::Running; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::minimizeOptimumStorage(FVectorType &x) { LevenbergMarquardtSpace::Status status = minimizeOptimumStorageInit(x); if (status==LevenbergMarquardtSpace::ImproperInputParameters) return status; do { status = minimizeOptimumStorageOneStep(x); } while (status==LevenbergMarquardtSpace::Running); return status; } template LevenbergMarquardtSpace::Status LevenbergMarquardt::lmdif1( FunctorType &functor, FVectorType &x, Index *nfev, const Scalar tol ) { Index n = x.size(); Index m = functor.values(); /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) return LevenbergMarquardtSpace::ImproperInputParameters; NumericalDiff numDiff(functor); // embedded LevenbergMarquardt LevenbergMarquardt, Scalar > lm(numDiff); lm.parameters.ftol = tol; lm.parameters.xtol = tol; lm.parameters.maxfev = 200*(n+1); LevenbergMarquardtSpace::Status info = LevenbergMarquardtSpace::Status(lm.minimize(x)); if (nfev) * nfev = lm.nfev; return info; } } // end namespace Eigen #endif // EIGEN_LEVENBERGMARQUARDT__H //vim: ai ts=4 sts=4 et sw=4 RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/chkder.h0000644000176200001440000000351012253717461026650 0ustar liggesusers#define chkder_log10e 0.43429448190325182765 #define chkder_factor 100. namespace Eigen { namespace internal { template void chkder( const Matrix< Scalar, Dynamic, 1 > &x, const Matrix< Scalar, Dynamic, 1 > &fvec, const Matrix< Scalar, Dynamic, Dynamic > &fjac, Matrix< Scalar, Dynamic, 1 > &xp, const Matrix< Scalar, Dynamic, 1 > &fvecp, int mode, Matrix< Scalar, Dynamic, 1 > &err ) { using std::sqrt; using std::abs; using std::log; typedef DenseIndex Index; const Scalar eps = sqrt(NumTraits::epsilon()); const Scalar epsf = chkder_factor * NumTraits::epsilon(); const Scalar epslog = chkder_log10e * log(eps); Scalar temp; const Index m = fvec.size(), n = x.size(); if (mode != 2) { /* mode = 1. */ xp.resize(n); for (Index j = 0; j < n; ++j) { temp = eps * abs(x[j]); if (temp == 0.) temp = eps; xp[j] = x[j] + temp; } } else { /* mode = 2. */ err.setZero(m); for (Index j = 0; j < n; ++j) { temp = abs(x[j]); if (temp == 0.) temp = 1.; err += temp * fjac.col(j); } for (Index i = 0; i < m; ++i) { temp = 1.; if (fvec[i] != 0. && fvecp[i] != 0. && abs(fvecp[i] - fvec[i]) >= epsf * abs(fvec[i])) temp = eps * abs((fvecp[i] - fvec[i]) / eps - err[i]) / (abs(fvec[i]) + abs(fvecp[i])); err[i] = 1.; if (temp > NumTraits::epsilon() && temp < eps) err[i] = (chkder_log10e * log(temp) - epslog) / epslog; if (temp >= eps) err[i] = 0.; } } } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h0000644000176200001440000000627712253717461026753 0ustar liggesusersnamespace Eigen { namespace internal { // TODO : once qrsolv2 is removed, use ColPivHouseholderQR or PermutationMatrix instead of ipvt template void qrsolv( Matrix< Scalar, Dynamic, Dynamic > &s, // TODO : use a PermutationMatrix once lmpar is no more: const VectorXi &ipvt, const Matrix< Scalar, Dynamic, 1 > &diag, const Matrix< Scalar, Dynamic, 1 > &qtb, Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &sdiag) { typedef DenseIndex Index; /* Local variables */ Index i, j, k, l; Scalar temp; Index n = s.cols(); Matrix< Scalar, Dynamic, 1 > wa(n); JacobiRotation givens; /* Function Body */ // the following will only change the lower triangular part of s, including // the diagonal, though the diagonal is restored afterward /* copy r and (q transpose)*b to preserve input and initialize s. */ /* in particular, save the diagonal elements of r in x. */ x = s.diagonal(); wa = qtb; s.topLeftCorner(n,n).template triangularView() = s.topLeftCorner(n,n).transpose(); /* eliminate the diagonal matrix d using a givens rotation. */ for (j = 0; j < n; ++j) { /* prepare the row of d to be eliminated, locating the */ /* diagonal element using p from the qr factorization. */ l = ipvt[j]; if (diag[l] == 0.) break; sdiag.tail(n-j).setZero(); sdiag[j] = diag[l]; /* the transformations to eliminate the row of d */ /* modify only a single element of (q transpose)*b */ /* beyond the first n, which is initially zero. */ Scalar qtbpj = 0.; for (k = j; k < n; ++k) { /* determine a givens rotation which eliminates the */ /* appropriate element in the current row of d. */ givens.makeGivens(-s(k,k), sdiag[k]); /* compute the modified diagonal element of r and */ /* the modified element of ((q transpose)*b,0). */ s(k,k) = givens.c() * s(k,k) + givens.s() * sdiag[k]; temp = givens.c() * wa[k] + givens.s() * qtbpj; qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj; wa[k] = temp; /* accumulate the tranformation in the row of s. */ for (i = k+1; i().solveInPlace(wa.head(nsing)); // restore sdiag = s.diagonal(); s.diagonal() = x; /* permute the components of z back to components of x. */ for (j = 0; j < n; ++j) x[ipvt[j]] = wa[j]; } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h0000644000176200001440000000207112253717461026642 0ustar liggesusersnamespace Eigen { namespace internal { // TODO : move this to GivensQR once there's such a thing in Eigen template void r1mpyq(DenseIndex m, DenseIndex n, Scalar *a, const std::vector > &v_givens, const std::vector > &w_givens) { typedef DenseIndex Index; /* apply the first set of givens rotations to a. */ for (Index j = n-2; j>=0; --j) for (Index i = 0; i void dogleg( const Matrix< Scalar, Dynamic, Dynamic > &qrfac, const Matrix< Scalar, Dynamic, 1 > &diag, const Matrix< Scalar, Dynamic, 1 > &qtb, Scalar delta, Matrix< Scalar, Dynamic, 1 > &x) { using std::abs; using std::sqrt; typedef DenseIndex Index; /* Local variables */ Index i, j; Scalar sum, temp, alpha, bnorm; Scalar gnorm, qnorm; Scalar sgnorm; /* Function Body */ const Scalar epsmch = NumTraits::epsilon(); const Index n = qrfac.cols(); eigen_assert(n==qtb.size()); eigen_assert(n==x.size()); eigen_assert(n==diag.size()); Matrix< Scalar, Dynamic, 1 > wa1(n), wa2(n); /* first, calculate the gauss-newton direction. */ for (j = n-1; j >=0; --j) { temp = qrfac(j,j); if (temp == 0.) { temp = epsmch * qrfac.col(j).head(j+1).maxCoeff(); if (temp == 0.) temp = epsmch; } if (j==n-1) x[j] = qtb[j] / temp; else x[j] = (qtb[j] - qrfac.row(j).tail(n-j-1).dot(x.tail(n-j-1))) / temp; } /* test whether the gauss-newton direction is acceptable. */ qnorm = diag.cwiseProduct(x).stableNorm(); if (qnorm <= delta) return; // TODO : this path is not tested by Eigen unit tests /* the gauss-newton direction is not acceptable. */ /* next, calculate the scaled gradient direction. */ wa1.fill(0.); for (j = 0; j < n; ++j) { wa1.tail(n-j) += qrfac.row(j).tail(n-j) * qtb[j]; wa1[j] /= diag[j]; } /* calculate the norm of the scaled gradient and test for */ /* the special case in which the scaled gradient is zero. */ gnorm = wa1.stableNorm(); sgnorm = 0.; alpha = delta / qnorm; if (gnorm == 0.) goto algo_end; /* calculate the point along the scaled gradient */ /* at which the quadratic is minimized. */ wa1.array() /= (diag*gnorm).array(); // TODO : once unit tests cover this part,: // wa2 = qrfac.template triangularView() * wa1; for (j = 0; j < n; ++j) { sum = 0.; for (i = j; i < n; ++i) { sum += qrfac(j,i) * wa1[i]; } wa2[j] = sum; } temp = wa2.stableNorm(); sgnorm = gnorm / temp / temp; /* test whether the scaled gradient direction is acceptable. */ alpha = 0.; if (sgnorm >= delta) goto algo_end; /* the scaled gradient direction is not acceptable. */ /* finally, calculate the point along the dogleg */ /* at which the quadratic is minimized. */ bnorm = qtb.stableNorm(); temp = bnorm / gnorm * (bnorm / qnorm) * (sgnorm / delta); temp = temp - delta / qnorm * numext::abs2(sgnorm / delta) + sqrt(numext::abs2(temp - delta / qnorm) + (1.-numext::abs2(delta / qnorm)) * (1.-numext::abs2(sgnorm / delta))); alpha = delta / qnorm * (1. - numext::abs2(sgnorm / delta)) / temp; algo_end: /* form appropriate convex combination of the gauss-newton */ /* direction and the scaled gradient direction. */ temp = (1.-alpha) * (std::min)(sgnorm,delta); x = temp * wa1 + alpha * x; } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h0000644000176200001440000004656412253717461031652 0ustar liggesusers// -*- coding: utf-8 // vim: set fileencoding=utf-8 // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HYBRIDNONLINEARSOLVER_H #define EIGEN_HYBRIDNONLINEARSOLVER_H namespace Eigen { namespace HybridNonLinearSolverSpace { enum Status { Running = -1, ImproperInputParameters = 0, RelativeErrorTooSmall = 1, TooManyFunctionEvaluation = 2, TolTooSmall = 3, NotMakingProgressJacobian = 4, NotMakingProgressIterations = 5, UserAsked = 6 }; } /** * \ingroup NonLinearOptimization_Module * \brief Finds a zero of a system of n * nonlinear functions in n variables by a modification of the Powell * hybrid method ("dogleg"). * * The user must provide a subroutine which calculates the * functions. The Jacobian is either provided by the user, or approximated * using a forward-difference method. * */ template class HybridNonLinearSolver { public: typedef DenseIndex Index; HybridNonLinearSolver(FunctorType &_functor) : functor(_functor) { nfev=njev=iter = 0; fnorm= 0.; useExternalScaling=false;} struct Parameters { Parameters() : factor(Scalar(100.)) , maxfev(1000) , xtol(std::sqrt(NumTraits::epsilon())) , nb_of_subdiagonals(-1) , nb_of_superdiagonals(-1) , epsfcn(Scalar(0.)) {} Scalar factor; Index maxfev; // maximum number of function evaluation Scalar xtol; Index nb_of_subdiagonals; Index nb_of_superdiagonals; Scalar epsfcn; }; typedef Matrix< Scalar, Dynamic, 1 > FVectorType; typedef Matrix< Scalar, Dynamic, Dynamic > JacobianType; /* TODO: if eigen provides a triangular storage, use it here */ typedef Matrix< Scalar, Dynamic, Dynamic > UpperTriangularType; HybridNonLinearSolverSpace::Status hybrj1( FVectorType &x, const Scalar tol = std::sqrt(NumTraits::epsilon()) ); HybridNonLinearSolverSpace::Status solveInit(FVectorType &x); HybridNonLinearSolverSpace::Status solveOneStep(FVectorType &x); HybridNonLinearSolverSpace::Status solve(FVectorType &x); HybridNonLinearSolverSpace::Status hybrd1( FVectorType &x, const Scalar tol = std::sqrt(NumTraits::epsilon()) ); HybridNonLinearSolverSpace::Status solveNumericalDiffInit(FVectorType &x); HybridNonLinearSolverSpace::Status solveNumericalDiffOneStep(FVectorType &x); HybridNonLinearSolverSpace::Status solveNumericalDiff(FVectorType &x); void resetParameters(void) { parameters = Parameters(); } Parameters parameters; FVectorType fvec, qtf, diag; JacobianType fjac; UpperTriangularType R; Index nfev; Index njev; Index iter; Scalar fnorm; bool useExternalScaling; private: FunctorType &functor; Index n; Scalar sum; bool sing; Scalar temp; Scalar delta; bool jeval; Index ncsuc; Scalar ratio; Scalar pnorm, xnorm, fnorm1; Index nslow1, nslow2; Index ncfail; Scalar actred, prered; FVectorType wa1, wa2, wa3, wa4; HybridNonLinearSolver& operator=(const HybridNonLinearSolver&); }; template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::hybrj1( FVectorType &x, const Scalar tol ) { n = x.size(); /* check the input parameters for errors. */ if (n <= 0 || tol < 0.) return HybridNonLinearSolverSpace::ImproperInputParameters; resetParameters(); parameters.maxfev = 100*(n+1); parameters.xtol = tol; diag.setConstant(n, 1.); useExternalScaling = true; return solve(x); } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::solveInit(FVectorType &x) { n = x.size(); wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(n); fvec.resize(n); qtf.resize(n); fjac.resize(n, n); if (!useExternalScaling) diag.resize(n); eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'"); /* Function Body */ nfev = 0; njev = 0; /* check the input parameters for errors. */ if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0. ) return HybridNonLinearSolverSpace::ImproperInputParameters; if (useExternalScaling) for (Index j = 0; j < n; ++j) if (diag[j] <= 0.) return HybridNonLinearSolverSpace::ImproperInputParameters; /* evaluate the function at the starting point */ /* and calculate its norm. */ nfev = 1; if ( functor(x, fvec) < 0) return HybridNonLinearSolverSpace::UserAsked; fnorm = fvec.stableNorm(); /* initialize iteration counter and monitors. */ iter = 1; ncsuc = 0; ncfail = 0; nslow1 = 0; nslow2 = 0; return HybridNonLinearSolverSpace::Running; } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::solveOneStep(FVectorType &x) { using std::abs; eigen_assert(x.size()==n); // check the caller is not cheating us Index j; std::vector > v_givens(n), w_givens(n); jeval = true; /* calculate the jacobian matrix. */ if ( functor.df(x, fjac) < 0) return HybridNonLinearSolverSpace::UserAsked; ++njev; wa2 = fjac.colwise().blueNorm(); /* on the first iteration and if external scaling is not used, scale according */ /* to the norms of the columns of the initial jacobian. */ if (iter == 1) { if (!useExternalScaling) for (j = 0; j < n; ++j) diag[j] = (wa2[j]==0.) ? 1. : wa2[j]; /* on the first iteration, calculate the norm of the scaled x */ /* and initialize the step bound delta. */ xnorm = diag.cwiseProduct(x).stableNorm(); delta = parameters.factor * xnorm; if (delta == 0.) delta = parameters.factor; } /* compute the qr factorization of the jacobian. */ HouseholderQR qrfac(fjac); // no pivoting: /* copy the triangular factor of the qr factorization into r. */ R = qrfac.matrixQR(); /* accumulate the orthogonal factor in fjac. */ fjac = qrfac.householderQ(); /* form (q transpose)*fvec and store in qtf. */ qtf = fjac.transpose() * fvec; /* rescale if necessary. */ if (!useExternalScaling) diag = diag.cwiseMax(wa2); while (true) { /* determine the direction p. */ internal::dogleg(R, diag, qtf, delta, wa1); /* store the direction p and x + p. calculate the norm of p. */ wa1 = -wa1; wa2 = x + wa1; pnorm = diag.cwiseProduct(wa1).stableNorm(); /* on the first iteration, adjust the initial step bound. */ if (iter == 1) delta = (std::min)(delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( functor(wa2, wa4) < 0) return HybridNonLinearSolverSpace::UserAsked; ++nfev; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ actred = -1.; if (fnorm1 < fnorm) /* Computing 2nd power */ actred = 1. - numext::abs2(fnorm1 / fnorm); /* compute the scaled predicted reduction. */ wa3 = R.template triangularView()*wa1 + qtf; temp = wa3.stableNorm(); prered = 0.; if (temp < fnorm) /* Computing 2nd power */ prered = 1. - numext::abs2(temp / fnorm); /* compute the ratio of the actual to the predicted reduction. */ ratio = 0.; if (prered > 0.) ratio = actred / prered; /* update the step bound. */ if (ratio < Scalar(.1)) { ncsuc = 0; ++ncfail; delta = Scalar(.5) * delta; } else { ncfail = 0; ++ncsuc; if (ratio >= Scalar(.5) || ncsuc > 1) delta = (std::max)(delta, pnorm / Scalar(.5)); if (abs(ratio - 1.) <= Scalar(.1)) { delta = pnorm / Scalar(.5); } } /* test for successful iteration. */ if (ratio >= Scalar(1e-4)) { /* successful iteration. update x, fvec, and their norms. */ x = wa2; wa2 = diag.cwiseProduct(x); fvec = wa4; xnorm = wa2.stableNorm(); fnorm = fnorm1; ++iter; } /* determine the progress of the iteration. */ ++nslow1; if (actred >= Scalar(.001)) nslow1 = 0; if (jeval) ++nslow2; if (actred >= Scalar(.1)) nslow2 = 0; /* test for convergence. */ if (delta <= parameters.xtol * xnorm || fnorm == 0.) return HybridNonLinearSolverSpace::RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= parameters.maxfev) return HybridNonLinearSolverSpace::TooManyFunctionEvaluation; if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits::epsilon() * xnorm) return HybridNonLinearSolverSpace::TolTooSmall; if (nslow2 == 5) return HybridNonLinearSolverSpace::NotMakingProgressJacobian; if (nslow1 == 10) return HybridNonLinearSolverSpace::NotMakingProgressIterations; /* criterion for recalculating jacobian. */ if (ncfail == 2) break; // leave inner loop and go for the next outer loop iteration /* calculate the rank one modification to the jacobian */ /* and update qtf if necessary. */ wa1 = diag.cwiseProduct( diag.cwiseProduct(wa1)/pnorm ); wa2 = fjac.transpose() * wa4; if (ratio >= Scalar(1e-4)) qtf = wa2; wa2 = (wa2-wa3)/pnorm; /* compute the qr factorization of the updated jacobian. */ internal::r1updt(R, wa1, v_givens, w_givens, wa2, wa3, &sing); internal::r1mpyq(n, n, fjac.data(), v_givens, w_givens); internal::r1mpyq(1, n, qtf.data(), v_givens, w_givens); jeval = false; } return HybridNonLinearSolverSpace::Running; } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::solve(FVectorType &x) { HybridNonLinearSolverSpace::Status status = solveInit(x); if (status==HybridNonLinearSolverSpace::ImproperInputParameters) return status; while (status==HybridNonLinearSolverSpace::Running) status = solveOneStep(x); return status; } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::hybrd1( FVectorType &x, const Scalar tol ) { n = x.size(); /* check the input parameters for errors. */ if (n <= 0 || tol < 0.) return HybridNonLinearSolverSpace::ImproperInputParameters; resetParameters(); parameters.maxfev = 200*(n+1); parameters.xtol = tol; diag.setConstant(n, 1.); useExternalScaling = true; return solveNumericalDiff(x); } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::solveNumericalDiffInit(FVectorType &x) { n = x.size(); if (parameters.nb_of_subdiagonals<0) parameters.nb_of_subdiagonals= n-1; if (parameters.nb_of_superdiagonals<0) parameters.nb_of_superdiagonals= n-1; wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(n); qtf.resize(n); fjac.resize(n, n); fvec.resize(n); if (!useExternalScaling) diag.resize(n); eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'"); /* Function Body */ nfev = 0; njev = 0; /* check the input parameters for errors. */ if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.nb_of_subdiagonals< 0 || parameters.nb_of_superdiagonals< 0 || parameters.factor <= 0. ) return HybridNonLinearSolverSpace::ImproperInputParameters; if (useExternalScaling) for (Index j = 0; j < n; ++j) if (diag[j] <= 0.) return HybridNonLinearSolverSpace::ImproperInputParameters; /* evaluate the function at the starting point */ /* and calculate its norm. */ nfev = 1; if ( functor(x, fvec) < 0) return HybridNonLinearSolverSpace::UserAsked; fnorm = fvec.stableNorm(); /* initialize iteration counter and monitors. */ iter = 1; ncsuc = 0; ncfail = 0; nslow1 = 0; nslow2 = 0; return HybridNonLinearSolverSpace::Running; } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::solveNumericalDiffOneStep(FVectorType &x) { using std::sqrt; using std::abs; assert(x.size()==n); // check the caller is not cheating us Index j; std::vector > v_givens(n), w_givens(n); jeval = true; if (parameters.nb_of_subdiagonals<0) parameters.nb_of_subdiagonals= n-1; if (parameters.nb_of_superdiagonals<0) parameters.nb_of_superdiagonals= n-1; /* calculate the jacobian matrix. */ if (internal::fdjac1(functor, x, fvec, fjac, parameters.nb_of_subdiagonals, parameters.nb_of_superdiagonals, parameters.epsfcn) <0) return HybridNonLinearSolverSpace::UserAsked; nfev += (std::min)(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n); wa2 = fjac.colwise().blueNorm(); /* on the first iteration and if external scaling is not used, scale according */ /* to the norms of the columns of the initial jacobian. */ if (iter == 1) { if (!useExternalScaling) for (j = 0; j < n; ++j) diag[j] = (wa2[j]==0.) ? 1. : wa2[j]; /* on the first iteration, calculate the norm of the scaled x */ /* and initialize the step bound delta. */ xnorm = diag.cwiseProduct(x).stableNorm(); delta = parameters.factor * xnorm; if (delta == 0.) delta = parameters.factor; } /* compute the qr factorization of the jacobian. */ HouseholderQR qrfac(fjac); // no pivoting: /* copy the triangular factor of the qr factorization into r. */ R = qrfac.matrixQR(); /* accumulate the orthogonal factor in fjac. */ fjac = qrfac.householderQ(); /* form (q transpose)*fvec and store in qtf. */ qtf = fjac.transpose() * fvec; /* rescale if necessary. */ if (!useExternalScaling) diag = diag.cwiseMax(wa2); while (true) { /* determine the direction p. */ internal::dogleg(R, diag, qtf, delta, wa1); /* store the direction p and x + p. calculate the norm of p. */ wa1 = -wa1; wa2 = x + wa1; pnorm = diag.cwiseProduct(wa1).stableNorm(); /* on the first iteration, adjust the initial step bound. */ if (iter == 1) delta = (std::min)(delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( functor(wa2, wa4) < 0) return HybridNonLinearSolverSpace::UserAsked; ++nfev; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ actred = -1.; if (fnorm1 < fnorm) /* Computing 2nd power */ actred = 1. - numext::abs2(fnorm1 / fnorm); /* compute the scaled predicted reduction. */ wa3 = R.template triangularView()*wa1 + qtf; temp = wa3.stableNorm(); prered = 0.; if (temp < fnorm) /* Computing 2nd power */ prered = 1. - numext::abs2(temp / fnorm); /* compute the ratio of the actual to the predicted reduction. */ ratio = 0.; if (prered > 0.) ratio = actred / prered; /* update the step bound. */ if (ratio < Scalar(.1)) { ncsuc = 0; ++ncfail; delta = Scalar(.5) * delta; } else { ncfail = 0; ++ncsuc; if (ratio >= Scalar(.5) || ncsuc > 1) delta = (std::max)(delta, pnorm / Scalar(.5)); if (abs(ratio - 1.) <= Scalar(.1)) { delta = pnorm / Scalar(.5); } } /* test for successful iteration. */ if (ratio >= Scalar(1e-4)) { /* successful iteration. update x, fvec, and their norms. */ x = wa2; wa2 = diag.cwiseProduct(x); fvec = wa4; xnorm = wa2.stableNorm(); fnorm = fnorm1; ++iter; } /* determine the progress of the iteration. */ ++nslow1; if (actred >= Scalar(.001)) nslow1 = 0; if (jeval) ++nslow2; if (actred >= Scalar(.1)) nslow2 = 0; /* test for convergence. */ if (delta <= parameters.xtol * xnorm || fnorm == 0.) return HybridNonLinearSolverSpace::RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= parameters.maxfev) return HybridNonLinearSolverSpace::TooManyFunctionEvaluation; if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits::epsilon() * xnorm) return HybridNonLinearSolverSpace::TolTooSmall; if (nslow2 == 5) return HybridNonLinearSolverSpace::NotMakingProgressJacobian; if (nslow1 == 10) return HybridNonLinearSolverSpace::NotMakingProgressIterations; /* criterion for recalculating jacobian. */ if (ncfail == 2) break; // leave inner loop and go for the next outer loop iteration /* calculate the rank one modification to the jacobian */ /* and update qtf if necessary. */ wa1 = diag.cwiseProduct( diag.cwiseProduct(wa1)/pnorm ); wa2 = fjac.transpose() * wa4; if (ratio >= Scalar(1e-4)) qtf = wa2; wa2 = (wa2-wa3)/pnorm; /* compute the qr factorization of the updated jacobian. */ internal::r1updt(R, wa1, v_givens, w_givens, wa2, wa3, &sing); internal::r1mpyq(n, n, fjac.data(), v_givens, w_givens); internal::r1mpyq(1, n, qtf.data(), v_givens, w_givens); jeval = false; } return HybridNonLinearSolverSpace::Running; } template HybridNonLinearSolverSpace::Status HybridNonLinearSolver::solveNumericalDiff(FVectorType &x) { HybridNonLinearSolverSpace::Status status = solveNumericalDiffInit(x); if (status==HybridNonLinearSolverSpace::ImproperInputParameters) return status; while (status==HybridNonLinearSolverSpace::Running) status = solveNumericalDiffOneStep(x); return status; } } // end namespace Eigen #endif // EIGEN_HYBRIDNONLINEARSOLVER_H //vim: ai ts=4 sts=4 et sw=4 RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/r1updt.h0000644000176200001440000000601212253717461026627 0ustar liggesusersnamespace Eigen { namespace internal { template void r1updt( Matrix< Scalar, Dynamic, Dynamic > &s, const Matrix< Scalar, Dynamic, 1> &u, std::vector > &v_givens, std::vector > &w_givens, Matrix< Scalar, Dynamic, 1> &v, Matrix< Scalar, Dynamic, 1> &w, bool *sing) { typedef DenseIndex Index; const JacobiRotation IdentityRotation = JacobiRotation(1,0); /* Local variables */ const Index m = s.rows(); const Index n = s.cols(); Index i, j=1; Scalar temp; JacobiRotation givens; // r1updt had a broader usecase, but we dont use it here. And, more // importantly, we can not test it. eigen_assert(m==n); eigen_assert(u.size()==m); eigen_assert(v.size()==n); eigen_assert(w.size()==n); /* move the nontrivial part of the last column of s into w. */ w[n-1] = s(n-1,n-1); /* rotate the vector v into a multiple of the n-th unit vector */ /* in such a way that a spike is introduced into w. */ for (j=n-2; j>=0; --j) { w[j] = 0.; if (v[j] != 0.) { /* determine a givens rotation which eliminates the */ /* j-th element of v. */ givens.makeGivens(-v[n-1], v[j]); /* apply the transformation to v and store the information */ /* necessary to recover the givens rotation. */ v[n-1] = givens.s() * v[j] + givens.c() * v[n-1]; v_givens[j] = givens; /* apply the transformation to s and extend the spike in w. */ for (i = j; i < m; ++i) { temp = givens.c() * s(j,i) - givens.s() * w[i]; w[i] = givens.s() * s(j,i) + givens.c() * w[i]; s(j,i) = temp; } } else v_givens[j] = IdentityRotation; } /* add the spike from the rank 1 update to w. */ w += v[n-1] * u; /* eliminate the spike. */ *sing = false; for (j = 0; j < n-1; ++j) { if (w[j] != 0.) { /* determine a givens rotation which eliminates the */ /* j-th element of the spike. */ givens.makeGivens(-s(j,j), w[j]); /* apply the transformation to s and reduce the spike in w. */ for (i = j; i < m; ++i) { temp = givens.c() * s(j,i) + givens.s() * w[i]; w[i] = -givens.s() * s(j,i) + givens.c() * w[i]; s(j,i) = temp; } /* store the information necessary to recover the */ /* givens rotation. */ w_givens[j] = givens; } else v_givens[j] = IdentityRotation; /* test for zero diagonal elements in the output s. */ if (s(j,j) == 0.) { *sing = true; } } /* move w back into the last column of the output s. */ s(n-1,n-1) = w[n-1]; if (s(j,j) == 0.) { *sing = true; } return; } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/lmpar.h0000644000176200001440000002162712253717461026534 0ustar liggesusersnamespace Eigen { namespace internal { template void lmpar( Matrix< Scalar, Dynamic, Dynamic > &r, const VectorXi &ipvt, const Matrix< Scalar, Dynamic, 1 > &diag, const Matrix< Scalar, Dynamic, 1 > &qtb, Scalar delta, Scalar &par, Matrix< Scalar, Dynamic, 1 > &x) { using std::abs; using std::sqrt; typedef DenseIndex Index; /* Local variables */ Index i, j, l; Scalar fp; Scalar parc, parl; Index iter; Scalar temp, paru; Scalar gnorm; Scalar dxnorm; /* Function Body */ const Scalar dwarf = (std::numeric_limits::min)(); const Index n = r.cols(); eigen_assert(n==diag.size()); eigen_assert(n==qtb.size()); eigen_assert(n==x.size()); Matrix< Scalar, Dynamic, 1 > wa1, wa2; /* compute and store in x the gauss-newton direction. if the */ /* jacobian is rank-deficient, obtain a least squares solution. */ Index nsing = n-1; wa1 = qtb; for (j = 0; j < n; ++j) { if (r(j,j) == 0. && nsing == n-1) nsing = j - 1; if (nsing < n-1) wa1[j] = 0.; } for (j = nsing; j>=0; --j) { wa1[j] /= r(j,j); temp = wa1[j]; for (i = 0; i < j ; ++i) wa1[i] -= r(i,j) * temp; } for (j = 0; j < n; ++j) x[ipvt[j]] = wa1[j]; /* initialize the iteration counter. */ /* evaluate the function at the origin, and test */ /* for acceptance of the gauss-newton direction. */ iter = 0; wa2 = diag.cwiseProduct(x); dxnorm = wa2.blueNorm(); fp = dxnorm - delta; if (fp <= Scalar(0.1) * delta) { par = 0; return; } /* if the jacobian is not rank deficient, the newton */ /* step provides a lower bound, parl, for the zero of */ /* the function. otherwise set this bound to zero. */ parl = 0.; if (nsing >= n-1) { for (j = 0; j < n; ++j) { l = ipvt[j]; wa1[j] = diag[l] * (wa2[l] / dxnorm); } // it's actually a triangularView.solveInplace(), though in a weird // way: for (j = 0; j < n; ++j) { Scalar sum = 0.; for (i = 0; i < j; ++i) sum += r(i,j) * wa1[i]; wa1[j] = (wa1[j] - sum) / r(j,j); } temp = wa1.blueNorm(); parl = fp / delta / temp / temp; } /* calculate an upper bound, paru, for the zero of the function. */ for (j = 0; j < n; ++j) wa1[j] = r.col(j).head(j+1).dot(qtb.head(j+1)) / diag[ipvt[j]]; gnorm = wa1.stableNorm(); paru = gnorm / delta; if (paru == 0.) paru = dwarf / (std::min)(delta,Scalar(0.1)); /* if the input par lies outside of the interval (parl,paru), */ /* set par to the closer endpoint. */ par = (std::max)(par,parl); par = (std::min)(par,paru); if (par == 0.) par = gnorm / dxnorm; /* beginning of an iteration. */ while (true) { ++iter; /* evaluate the function at the current value of par. */ if (par == 0.) par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */ wa1 = sqrt(par)* diag; Matrix< Scalar, Dynamic, 1 > sdiag(n); qrsolv(r, ipvt, wa1, qtb, x, sdiag); wa2 = diag.cwiseProduct(x); dxnorm = wa2.blueNorm(); temp = fp; fp = dxnorm - delta; /* if the function is small enough, accept the current value */ /* of par. also test for the exceptional cases where parl */ /* is zero or the number of iterations has reached 10. */ if (abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10) break; /* compute the newton correction. */ for (j = 0; j < n; ++j) { l = ipvt[j]; wa1[j] = diag[l] * (wa2[l] / dxnorm); } for (j = 0; j < n; ++j) { wa1[j] /= sdiag[j]; temp = wa1[j]; for (i = j+1; i < n; ++i) wa1[i] -= r(i,j) * temp; } temp = wa1.blueNorm(); parc = fp / delta / temp / temp; /* depending on the sign of the function, update parl or paru. */ if (fp > 0.) parl = (std::max)(parl,par); if (fp < 0.) paru = (std::min)(paru,par); /* compute an improved estimate for par. */ /* Computing MAX */ par = (std::max)(parl,par+parc); /* end of an iteration. */ } /* termination. */ if (iter == 0) par = 0.; return; } template void lmpar2( const ColPivHouseholderQR > &qr, const Matrix< Scalar, Dynamic, 1 > &diag, const Matrix< Scalar, Dynamic, 1 > &qtb, Scalar delta, Scalar &par, Matrix< Scalar, Dynamic, 1 > &x) { using std::sqrt; using std::abs; typedef DenseIndex Index; /* Local variables */ Index j; Scalar fp; Scalar parc, parl; Index iter; Scalar temp, paru; Scalar gnorm; Scalar dxnorm; /* Function Body */ const Scalar dwarf = (std::numeric_limits::min)(); const Index n = qr.matrixQR().cols(); eigen_assert(n==diag.size()); eigen_assert(n==qtb.size()); Matrix< Scalar, Dynamic, 1 > wa1, wa2; /* compute and store in x the gauss-newton direction. if the */ /* jacobian is rank-deficient, obtain a least squares solution. */ // const Index rank = qr.nonzeroPivots(); // exactly double(0.) const Index rank = qr.rank(); // use a threshold wa1 = qtb; wa1.tail(n-rank).setZero(); qr.matrixQR().topLeftCorner(rank, rank).template triangularView().solveInPlace(wa1.head(rank)); x = qr.colsPermutation()*wa1; /* initialize the iteration counter. */ /* evaluate the function at the origin, and test */ /* for acceptance of the gauss-newton direction. */ iter = 0; wa2 = diag.cwiseProduct(x); dxnorm = wa2.blueNorm(); fp = dxnorm - delta; if (fp <= Scalar(0.1) * delta) { par = 0; return; } /* if the jacobian is not rank deficient, the newton */ /* step provides a lower bound, parl, for the zero of */ /* the function. otherwise set this bound to zero. */ parl = 0.; if (rank==n) { wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm; qr.matrixQR().topLeftCorner(n, n).transpose().template triangularView().solveInPlace(wa1); temp = wa1.blueNorm(); parl = fp / delta / temp / temp; } /* calculate an upper bound, paru, for the zero of the function. */ for (j = 0; j < n; ++j) wa1[j] = qr.matrixQR().col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)]; gnorm = wa1.stableNorm(); paru = gnorm / delta; if (paru == 0.) paru = dwarf / (std::min)(delta,Scalar(0.1)); /* if the input par lies outside of the interval (parl,paru), */ /* set par to the closer endpoint. */ par = (std::max)(par,parl); par = (std::min)(par,paru); if (par == 0.) par = gnorm / dxnorm; /* beginning of an iteration. */ Matrix< Scalar, Dynamic, Dynamic > s = qr.matrixQR(); while (true) { ++iter; /* evaluate the function at the current value of par. */ if (par == 0.) par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */ wa1 = sqrt(par)* diag; Matrix< Scalar, Dynamic, 1 > sdiag(n); qrsolv(s, qr.colsPermutation().indices(), wa1, qtb, x, sdiag); wa2 = diag.cwiseProduct(x); dxnorm = wa2.blueNorm(); temp = fp; fp = dxnorm - delta; /* if the function is small enough, accept the current value */ /* of par. also test for the exceptional cases where parl */ /* is zero or the number of iterations has reached 10. */ if (abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10) break; /* compute the newton correction. */ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm); // we could almost use this here, but the diagonal is outside qr, in sdiag[] // qr.matrixQR().topLeftCorner(n, n).transpose().template triangularView().solveInPlace(wa1); for (j = 0; j < n; ++j) { wa1[j] /= sdiag[j]; temp = wa1[j]; for (Index i = j+1; i < n; ++i) wa1[i] -= s(i,j) * temp; } temp = wa1.blueNorm(); parc = fp / delta / temp / temp; /* depending on the sign of the function, update parl or paru. */ if (fp > 0.) parl = (std::max)(parl,par); if (fp < 0.) paru = (std::min)(paru,par); /* compute an improved estimate for par. */ par = (std::max)(parl,par+parc); } if (iter == 0) par = 0.; return; } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/covar.h0000644000176200001440000000357312253717461026533 0ustar liggesusersnamespace Eigen { namespace internal { template void covar( Matrix< Scalar, Dynamic, Dynamic > &r, const VectorXi &ipvt, Scalar tol = std::sqrt(NumTraits::epsilon()) ) { using std::abs; typedef DenseIndex Index; /* Local variables */ Index i, j, k, l, ii, jj; bool sing; Scalar temp; /* Function Body */ const Index n = r.cols(); const Scalar tolr = tol * abs(r(0,0)); Matrix< Scalar, Dynamic, 1 > wa(n); eigen_assert(ipvt.size()==n); /* form the inverse of r in the full upper triangle of r. */ l = -1; for (k = 0; k < n; ++k) if (abs(r(k,k)) > tolr) { r(k,k) = 1. / r(k,k); for (j = 0; j <= k-1; ++j) { temp = r(k,k) * r(j,k); r(j,k) = 0.; r.col(k).head(j+1) -= r.col(j).head(j+1) * temp; } l = k; } /* form the full upper triangle of the inverse of (r transpose)*r */ /* in the full upper triangle of r. */ for (k = 0; k <= l; ++k) { for (j = 0; j <= k-1; ++j) r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k); r.col(k).head(k+1) *= r(k,k); } /* form the full lower triangle of the covariance matrix */ /* in the strict lower triangle of r and in wa. */ for (j = 0; j < n; ++j) { jj = ipvt[j]; sing = j > l; for (i = 0; i <= j; ++i) { if (sing) r(i,j) = 0.; ii = ipvt[i]; if (ii > jj) r(ii,jj) = r(i,j); if (ii < jj) r(jj,ii) = r(i,j); } wa[jj] = r(j,j); } /* symmetrize the covariance matrix in r. */ r.topLeftCorner(n,n).template triangularView() = r.topLeftCorner(n,n).transpose(); r.diagonal() = wa; } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h0000644000176200001440000000426112253717461026544 0ustar liggesusersnamespace Eigen { namespace internal { template DenseIndex fdjac1( const FunctorType &Functor, Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &fvec, Matrix< Scalar, Dynamic, Dynamic > &fjac, DenseIndex ml, DenseIndex mu, Scalar epsfcn) { using std::sqrt; using std::abs; typedef DenseIndex Index; /* Local variables */ Scalar h; Index j, k; Scalar eps, temp; Index msum; int iflag; Index start, length; /* Function Body */ const Scalar epsmch = NumTraits::epsilon(); const Index n = x.size(); eigen_assert(fvec.size()==n); Matrix< Scalar, Dynamic, 1 > wa1(n); Matrix< Scalar, Dynamic, 1 > wa2(n); eps = sqrt((std::max)(epsfcn,epsmch)); msum = ml + mu + 1; if (msum >= n) { /* computation of dense approximate jacobian. */ for (j = 0; j < n; ++j) { temp = x[j]; h = eps * abs(temp); if (h == 0.) h = eps; x[j] = temp + h; iflag = Functor(x, wa1); if (iflag < 0) return iflag; x[j] = temp; fjac.col(j) = (wa1-fvec)/h; } }else { /* computation of banded approximate jacobian. */ for (k = 0; k < msum; ++k) { for (j = k; (msum<0) ? (j>n): (jn): (j(0,j-mu); length = (std::min)(n-1, j+ml) - start + 1; fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h; } } } return 0; } } // end namespace internal } // end namespace Eigen RcppEigen/inst/include/unsupported/Eigen/src/Polynomials/0000755000176200001440000000000012253717461023252 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/Polynomials/Companion.h0000644000176200001440000001710112253717461025346 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Manuel Yguel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COMPANION_H #define EIGEN_COMPANION_H // This file requires the user to include // * Eigen/Core // * Eigen/src/PolynomialSolver.h namespace Eigen { namespace internal { #ifndef EIGEN_PARSED_BY_DOXYGEN template T radix(){ return 2; } template T radix2(){ return radix()*radix(); } template struct decrement_if_fixed_size { enum { ret = (Size == Dynamic) ? Dynamic : Size-1 }; }; #endif template< typename _Scalar, int _Deg > class companion { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg) enum { Deg = _Deg, Deg_1=decrement_if_fixed_size::ret }; typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef Matrix RightColumn; //typedef DiagonalMatrix< Scalar, Deg_1, Deg_1 > BottomLeftDiagonal; typedef Matrix BottomLeftDiagonal; typedef Matrix DenseCompanionMatrixType; typedef Matrix< Scalar, _Deg, Deg_1 > LeftBlock; typedef Matrix< Scalar, Deg_1, Deg_1 > BottomLeftBlock; typedef Matrix< Scalar, 1, Deg_1 > LeftBlockFirstRow; typedef DenseIndex Index; public: EIGEN_STRONG_INLINE const _Scalar operator()(Index row, Index col ) const { if( m_bl_diag.rows() > col ) { if( 0 < row ){ return m_bl_diag[col]; } else{ return 0; } } else{ return m_monic[row]; } } public: template void setPolynomial( const VectorType& poly ) { const Index deg = poly.size()-1; m_monic = -1/poly[deg] * poly.head(deg); //m_bl_diag.setIdentity( deg-1 ); m_bl_diag.setOnes(deg-1); } template companion( const VectorType& poly ){ setPolynomial( poly ); } public: DenseCompanionMatrixType denseMatrix() const { const Index deg = m_monic.size(); const Index deg_1 = deg-1; DenseCompanionMatrixType companion(deg,deg); companion << ( LeftBlock(deg,deg_1) << LeftBlockFirstRow::Zero(1,deg_1), BottomLeftBlock::Identity(deg-1,deg-1)*m_bl_diag.asDiagonal() ).finished() , m_monic; return companion; } protected: /** Helper function for the balancing algorithm. * \returns true if the row and the column, having colNorm and rowNorm * as norms, are balanced, false otherwise. * colB and rowB are repectively the multipliers for * the column and the row in order to balance them. * */ bool balanced( Scalar colNorm, Scalar rowNorm, bool& isBalanced, Scalar& colB, Scalar& rowB ); /** Helper function for the balancing algorithm. * \returns true if the row and the column, having colNorm and rowNorm * as norms, are balanced, false otherwise. * colB and rowB are repectively the multipliers for * the column and the row in order to balance them. * */ bool balancedR( Scalar colNorm, Scalar rowNorm, bool& isBalanced, Scalar& colB, Scalar& rowB ); public: /** * Balancing algorithm from B. N. PARLETT and C. REINSCH (1969) * "Balancing a matrix for calculation of eigenvalues and eigenvectors" * adapted to the case of companion matrices. * A matrix with non zero row and non zero column is balanced * for a certain norm if the i-th row and the i-th column * have same norm for all i. */ void balance(); protected: RightColumn m_monic; BottomLeftDiagonal m_bl_diag; }; template< typename _Scalar, int _Deg > inline bool companion<_Scalar,_Deg>::balanced( Scalar colNorm, Scalar rowNorm, bool& isBalanced, Scalar& colB, Scalar& rowB ) { if( Scalar(0) == colNorm || Scalar(0) == rowNorm ){ return true; } else { //To find the balancing coefficients, if the radix is 2, //one finds \f$ \sigma \f$ such that // \f$ 2^{2\sigma-1} < rowNorm / colNorm \le 2^{2\sigma+1} \f$ // then the balancing coefficient for the row is \f$ 1/2^{\sigma} \f$ // and the balancing coefficient for the column is \f$ 2^{\sigma} \f$ rowB = rowNorm / radix(); colB = Scalar(1); const Scalar s = colNorm + rowNorm; while (colNorm < rowB) { colB *= radix(); colNorm *= radix2(); } rowB = rowNorm * radix(); while (colNorm >= rowB) { colB /= radix(); colNorm /= radix2(); } //This line is used to avoid insubstantial balancing if ((rowNorm + colNorm) < Scalar(0.95) * s * colB) { isBalanced = false; rowB = Scalar(1) / colB; return false; } else{ return true; } } } template< typename _Scalar, int _Deg > inline bool companion<_Scalar,_Deg>::balancedR( Scalar colNorm, Scalar rowNorm, bool& isBalanced, Scalar& colB, Scalar& rowB ) { if( Scalar(0) == colNorm || Scalar(0) == rowNorm ){ return true; } else { /** * Set the norm of the column and the row to the geometric mean * of the row and column norm */ const _Scalar q = colNorm/rowNorm; if( !isApprox( q, _Scalar(1) ) ) { rowB = sqrt( colNorm/rowNorm ); colB = Scalar(1)/rowB; isBalanced = false; return false; } else{ return true; } } } template< typename _Scalar, int _Deg > void companion<_Scalar,_Deg>::balance() { using std::abs; EIGEN_STATIC_ASSERT( Deg == Dynamic || 1 < Deg, YOU_MADE_A_PROGRAMMING_MISTAKE ); const Index deg = m_monic.size(); const Index deg_1 = deg-1; bool hasConverged=false; while( !hasConverged ) { hasConverged = true; Scalar colNorm,rowNorm; Scalar colB,rowB; //First row, first column excluding the diagonal //============================================== colNorm = abs(m_bl_diag[0]); rowNorm = abs(m_monic[0]); //Compute balancing of the row and the column if( !balanced( colNorm, rowNorm, hasConverged, colB, rowB ) ) { m_bl_diag[0] *= colB; m_monic[0] *= rowB; } //Middle rows and columns excluding the diagonal //============================================== for( Index i=1; i headMonic( m_monic, 0, deg_1 ); colNorm = headMonic.array().abs().sum(); rowNorm = abs( m_bl_diag[ebl] ); //Compute balancing of the row and the column if( !balanced( colNorm, rowNorm, hasConverged, colB, rowB ) ) { headMonic *= colB; m_bl_diag[ebl] *= rowB; } } } } // end namespace internal } // end namespace Eigen #endif // EIGEN_COMPANION_H RcppEigen/inst/include/unsupported/Eigen/src/Polynomials/PolynomialUtils.h0000644000176200001440000001137312253717461026574 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Manuel Yguel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_POLYNOMIAL_UTILS_H #define EIGEN_POLYNOMIAL_UTILS_H namespace Eigen { /** \ingroup Polynomials_Module * \returns the evaluation of the polynomial at x using Horner algorithm. * * \param[in] poly : the vector of coefficients of the polynomial ordered * by degrees i.e. poly[i] is the coefficient of degree i of the polynomial * e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$. * \param[in] x : the value to evaluate the polynomial at. * * Note for stability: *
\f$ |x| \le 1 \f$
*/ template inline T poly_eval_horner( const Polynomials& poly, const T& x ) { T val=poly[poly.size()-1]; for(DenseIndex i=poly.size()-2; i>=0; --i ){ val = val*x + poly[i]; } return val; } /** \ingroup Polynomials_Module * \returns the evaluation of the polynomial at x using stabilized Horner algorithm. * * \param[in] poly : the vector of coefficients of the polynomial ordered * by degrees i.e. poly[i] is the coefficient of degree i of the polynomial * e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$. * \param[in] x : the value to evaluate the polynomial at. */ template inline T poly_eval( const Polynomials& poly, const T& x ) { typedef typename NumTraits::Real Real; if( numext::abs2( x ) <= Real(1) ){ return poly_eval_horner( poly, x ); } else { T val=poly[0]; T inv_x = T(1)/x; for( DenseIndex i=1; iPrecondition:
*
the leading coefficient of the input polynomial poly must be non zero
*/ template inline typename NumTraits::Real cauchy_max_bound( const Polynomial& poly ) { using std::abs; typedef typename Polynomial::Scalar Scalar; typedef typename NumTraits::Real Real; eigen_assert( Scalar(0) != poly[poly.size()-1] ); const Scalar inv_leading_coeff = Scalar(1)/poly[poly.size()-1]; Real cb(0); for( DenseIndex i=0; i inline typename NumTraits::Real cauchy_min_bound( const Polynomial& poly ) { using std::abs; typedef typename Polynomial::Scalar Scalar; typedef typename NumTraits::Real Real; DenseIndex i=0; while( i void roots_to_monicPolynomial( const RootVector& rv, Polynomial& poly ) { typedef typename Polynomial::Scalar Scalar; poly.setZero( rv.size()+1 ); poly[0] = -rv[0]; poly[1] = Scalar(1); for( DenseIndex i=1; i< rv.size(); ++i ) { for( DenseIndex j=i+1; j>0; --j ){ poly[j] = poly[j-1] - rv[i]*poly[j]; } poly[0] = -rv[i]*poly[0]; } } } // end namespace Eigen #endif // EIGEN_POLYNOMIAL_UTILS_H RcppEigen/inst/include/unsupported/Eigen/src/Polynomials/PolynomialSolver.h0000644000176200001440000003323512253717461026747 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Manuel Yguel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_POLYNOMIAL_SOLVER_H #define EIGEN_POLYNOMIAL_SOLVER_H namespace Eigen { /** \ingroup Polynomials_Module * \class PolynomialSolverBase. * * \brief Defined to be inherited by polynomial solvers: it provides * convenient methods such as * - real roots, * - greatest, smallest complex roots, * - real roots with greatest, smallest absolute real value, * - greatest, smallest real roots. * * It stores the set of roots as a vector of complexes. * */ template< typename _Scalar, int _Deg > class PolynomialSolverBase { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg) typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef std::complex RootType; typedef Matrix RootsType; typedef DenseIndex Index; protected: template< typename OtherPolynomial > inline void setPolynomial( const OtherPolynomial& poly ){ m_roots.resize(poly.size()); } public: template< typename OtherPolynomial > inline PolynomialSolverBase( const OtherPolynomial& poly ){ setPolynomial( poly() ); } inline PolynomialSolverBase(){} public: /** \returns the complex roots of the polynomial */ inline const RootsType& roots() const { return m_roots; } public: /** Clear and fills the back insertion sequence with the real roots of the polynomial * i.e. the real part of the complex roots that have an imaginary part which * absolute value is smaller than absImaginaryThreshold. * absImaginaryThreshold takes the dummy_precision associated * with the _Scalar template parameter of the PolynomialSolver class as the default value. * * \param[out] bi_seq : the back insertion sequence (stl concept) * \param[in] absImaginaryThreshold : the maximum bound of the imaginary part of a complex * number that is considered as real. * */ template inline void realRoots( Stl_back_insertion_sequence& bi_seq, const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const { using std::abs; bi_seq.clear(); for(Index i=0; i inline const RootType& selectComplexRoot_withRespectToNorm( squaredNormBinaryPredicate& pred ) const { Index res=0; RealScalar norm2 = numext::abs2( m_roots[0] ); for( Index i=1; i greater; return selectComplexRoot_withRespectToNorm( greater ); } /** * \returns the complex root with smallest norm. */ inline const RootType& smallestRoot() const { std::less less; return selectComplexRoot_withRespectToNorm( less ); } protected: template inline const RealScalar& selectRealRoot_withRespectToAbsRealPart( squaredRealPartBinaryPredicate& pred, bool& hasArealRoot, const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const { using std::abs; hasArealRoot = false; Index res=0; RealScalar abs2(0); for( Index i=0; i inline const RealScalar& selectRealRoot_withRespectToRealPart( RealPartBinaryPredicate& pred, bool& hasArealRoot, const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const { using std::abs; hasArealRoot = false; Index res=0; RealScalar val(0); for( Index i=0; i::dummy_precision() ) const { std::greater greater; return selectRealRoot_withRespectToAbsRealPart( greater, hasArealRoot, absImaginaryThreshold ); } /** * \returns a real root with smallest absolute magnitude. * A real root is defined as the real part of a complex root with absolute imaginary * part smallest than absImaginaryThreshold. * absImaginaryThreshold takes the dummy_precision associated * with the _Scalar template parameter of the PolynomialSolver class as the default value. * If no real root is found the boolean hasArealRoot is set to false and the real part of * the root with smallest absolute imaginary part is returned instead. * * \param[out] hasArealRoot : boolean true if a real root is found according to the * absImaginaryThreshold criterion, false otherwise. * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide * whether or not a root is real. */ inline const RealScalar& absSmallestRealRoot( bool& hasArealRoot, const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const { std::less less; return selectRealRoot_withRespectToAbsRealPart( less, hasArealRoot, absImaginaryThreshold ); } /** * \returns the real root with greatest value. * A real root is defined as the real part of a complex root with absolute imaginary * part smallest than absImaginaryThreshold. * absImaginaryThreshold takes the dummy_precision associated * with the _Scalar template parameter of the PolynomialSolver class as the default value. * If no real root is found the boolean hasArealRoot is set to false and the real part of * the root with smallest absolute imaginary part is returned instead. * * \param[out] hasArealRoot : boolean true if a real root is found according to the * absImaginaryThreshold criterion, false otherwise. * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide * whether or not a root is real. */ inline const RealScalar& greatestRealRoot( bool& hasArealRoot, const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const { std::greater greater; return selectRealRoot_withRespectToRealPart( greater, hasArealRoot, absImaginaryThreshold ); } /** * \returns the real root with smallest value. * A real root is defined as the real part of a complex root with absolute imaginary * part smallest than absImaginaryThreshold. * absImaginaryThreshold takes the dummy_precision associated * with the _Scalar template parameter of the PolynomialSolver class as the default value. * If no real root is found the boolean hasArealRoot is set to false and the real part of * the root with smallest absolute imaginary part is returned instead. * * \param[out] hasArealRoot : boolean true if a real root is found according to the * absImaginaryThreshold criterion, false otherwise. * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide * whether or not a root is real. */ inline const RealScalar& smallestRealRoot( bool& hasArealRoot, const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const { std::less less; return selectRealRoot_withRespectToRealPart( less, hasArealRoot, absImaginaryThreshold ); } protected: RootsType m_roots; }; #define EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( BASE ) \ typedef typename BASE::Scalar Scalar; \ typedef typename BASE::RealScalar RealScalar; \ typedef typename BASE::RootType RootType; \ typedef typename BASE::RootsType RootsType; /** \ingroup Polynomials_Module * * \class PolynomialSolver * * \brief A polynomial solver * * Computes the complex roots of a real polynomial. * * \param _Scalar the scalar type, i.e., the type of the polynomial coefficients * \param _Deg the degree of the polynomial, can be a compile time value or Dynamic. * Notice that the number of polynomial coefficients is _Deg+1. * * This class implements a polynomial solver and provides convenient methods such as * - real roots, * - greatest, smallest complex roots, * - real roots with greatest, smallest absolute real value. * - greatest, smallest real roots. * * WARNING: this polynomial solver is experimental, part of the unsuported Eigen modules. * * * Currently a QR algorithm is used to compute the eigenvalues of the companion matrix of * the polynomial to compute its roots. * This supposes that the complex moduli of the roots are all distinct: e.g. there should * be no multiple roots or conjugate roots for instance. * With 32bit (float) floating types this problem shows up frequently. * However, almost always, correct accuracy is reached even in these cases for 64bit * (double) floating types and small polynomial degree (<20). */ template< typename _Scalar, int _Deg > class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg) typedef PolynomialSolverBase<_Scalar,_Deg> PS_Base; EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base ) typedef Matrix CompanionMatrixType; typedef EigenSolver EigenSolverType; public: /** Computes the complex roots of a new polynomial. */ template< typename OtherPolynomial > void compute( const OtherPolynomial& poly ) { eigen_assert( Scalar(0) != poly[poly.size()-1] ); internal::companion companion( poly ); companion.balance(); m_eigenSolver.compute( companion.denseMatrix() ); m_roots = m_eigenSolver.eigenvalues(); } public: template< typename OtherPolynomial > inline PolynomialSolver( const OtherPolynomial& poly ){ compute( poly ); } inline PolynomialSolver(){} protected: using PS_Base::m_roots; EigenSolverType m_eigenSolver; }; template< typename _Scalar > class PolynomialSolver<_Scalar,1> : public PolynomialSolverBase<_Scalar,1> { public: typedef PolynomialSolverBase<_Scalar,1> PS_Base; EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base ) public: /** Computes the complex roots of a new polynomial. */ template< typename OtherPolynomial > void compute( const OtherPolynomial& poly ) { eigen_assert( Scalar(0) != poly[poly.size()-1] ); m_roots[0] = -poly[0]/poly[poly.size()-1]; } protected: using PS_Base::m_roots; }; } // end namespace Eigen #endif // EIGEN_POLYNOMIAL_SOLVER_H RcppEigen/inst/include/unsupported/Eigen/src/FFT/0000755000176200001440000000000012253717461021363 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/FFT/ei_fftw_impl.h0000644000176200001440000002200612253717461024200 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Mark Borgerding mark a borgerding net // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. namespace Eigen { namespace internal { // FFTW uses non-const arguments // so we must use ugly const_cast calls for all the args it uses // // This should be safe as long as // 1. we use FFTW_ESTIMATE for all our planning // see the FFTW docs section 4.3.2 "Planner Flags" // 2. fftw_complex is compatible with std::complex // This assumes std::complex layout is array of size 2 with real,imag template inline T * fftw_cast(const T* p) { return const_cast( p); } inline fftw_complex * fftw_cast( const std::complex * p) { return const_cast( reinterpret_cast(p) ); } inline fftwf_complex * fftw_cast( const std::complex * p) { return const_cast( reinterpret_cast(p) ); } inline fftwl_complex * fftw_cast( const std::complex * p) { return const_cast( reinterpret_cast(p) ); } template struct fftw_plan {}; template <> struct fftw_plan { typedef float scalar_type; typedef fftwf_complex complex_type; fftwf_plan m_plan; fftw_plan() :m_plan(NULL) {} ~fftw_plan() {if (m_plan) fftwf_destroy_plan(m_plan);} inline void fwd(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwf_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft( m_plan, src,dst); } inline void inv(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwf_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft( m_plan, src,dst); } inline void fwd(complex_type * dst,scalar_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwf_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft_r2c( m_plan,src,dst); } inline void inv(scalar_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwf_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft_c2r( m_plan, src,dst); } inline void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft( m_plan, src,dst); } inline void inv2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft( m_plan, src,dst); } }; template <> struct fftw_plan { typedef double scalar_type; typedef fftw_complex complex_type; ::fftw_plan m_plan; fftw_plan() :m_plan(NULL) {} ~fftw_plan() {if (m_plan) fftw_destroy_plan(m_plan);} inline void fwd(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftw_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft( m_plan, src,dst); } inline void inv(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftw_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft( m_plan, src,dst); } inline void fwd(complex_type * dst,scalar_type * src,int nfft) { if (m_plan==NULL) m_plan = fftw_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft_r2c( m_plan,src,dst); } inline void inv(scalar_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftw_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft_c2r( m_plan, src,dst); } inline void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft( m_plan, src,dst); } inline void inv2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft( m_plan, src,dst); } }; template <> struct fftw_plan { typedef long double scalar_type; typedef fftwl_complex complex_type; fftwl_plan m_plan; fftw_plan() :m_plan(NULL) {} ~fftw_plan() {if (m_plan) fftwl_destroy_plan(m_plan);} inline void fwd(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwl_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft( m_plan, src,dst); } inline void inv(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwl_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft( m_plan, src,dst); } inline void fwd(complex_type * dst,scalar_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwl_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft_r2c( m_plan,src,dst); } inline void inv(scalar_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwl_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft_c2r( m_plan, src,dst); } inline void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft( m_plan, src,dst); } inline void inv2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft( m_plan, src,dst); } }; template struct fftw_impl { typedef _Scalar Scalar; typedef std::complex Complex; inline void clear() { m_plans.clear(); } // complex-to-complex forward FFT inline void fwd( Complex * dst,const Complex *src,int nfft) { get_plan(nfft,false,dst,src).fwd(fftw_cast(dst), fftw_cast(src),nfft ); } // real-to-complex forward FFT inline void fwd( Complex * dst,const Scalar * src,int nfft) { get_plan(nfft,false,dst,src).fwd(fftw_cast(dst), fftw_cast(src) ,nfft); } // 2-d complex-to-complex inline void fwd2(Complex * dst, const Complex * src, int n0,int n1) { get_plan(n0,n1,false,dst,src).fwd2(fftw_cast(dst), fftw_cast(src) ,n0,n1); } // inverse complex-to-complex inline void inv(Complex * dst,const Complex *src,int nfft) { get_plan(nfft,true,dst,src).inv(fftw_cast(dst), fftw_cast(src),nfft ); } // half-complex to scalar inline void inv( Scalar * dst,const Complex * src,int nfft) { get_plan(nfft,true,dst,src).inv(fftw_cast(dst), fftw_cast(src),nfft ); } // 2-d complex-to-complex inline void inv2(Complex * dst, const Complex * src, int n0,int n1) { get_plan(n0,n1,true,dst,src).inv2(fftw_cast(dst), fftw_cast(src) ,n0,n1); } protected: typedef fftw_plan PlanData; typedef std::map PlanMap; PlanMap m_plans; inline PlanData & get_plan(int nfft,bool inverse,void * dst,const void * src) { bool inplace = (dst==src); bool aligned = ( (reinterpret_cast(src)&15) | (reinterpret_cast(dst)&15) ) == 0; int64_t key = ( (nfft<<3 ) | (inverse<<2) | (inplace<<1) | aligned ) << 1; return m_plans[key]; } inline PlanData & get_plan(int n0,int n1,bool inverse,void * dst,const void * src) { bool inplace = (dst==src); bool aligned = ( (reinterpret_cast(src)&15) | (reinterpret_cast(dst)&15) ) == 0; int64_t key = ( ( (((int64_t)n0) << 30)|(n1<<3 ) | (inverse<<2) | (inplace<<1) | aligned ) << 1 ) + 1; return m_plans[key]; } }; } // end namespace internal } // end namespace Eigen /* vim: set filetype=cpp et sw=2 ts=2 ai: */ RcppEigen/inst/include/unsupported/Eigen/src/FFT/ei_kissfft_impl.h0000644000176200001440000002776312253717461024722 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Mark Borgerding mark a borgerding net // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. namespace Eigen { namespace internal { // This FFT implementation was derived from kissfft http:sourceforge.net/projects/kissfft // Copyright 2003-2009 Mark Borgerding template struct kiss_cpx_fft { typedef _Scalar Scalar; typedef std::complex Complex; std::vector m_twiddles; std::vector m_stageRadix; std::vector m_stageRemainder; std::vector m_scratchBuf; bool m_inverse; inline void make_twiddles(int nfft,bool inverse) { using std::acos; m_inverse = inverse; m_twiddles.resize(nfft); Scalar phinc = (inverse?2:-2)* acos( (Scalar) -1) / nfft; for (int i=0;in) p=n;// impossible to have a factor > sqrt(n) } n /= p; m_stageRadix.push_back(p); m_stageRemainder.push_back(n); if ( p > 5 ) m_scratchBuf.resize(p); // scratchbuf will be needed in bfly_generic }while(n>1); } template inline void work( int stage,Complex * xout, const _Src * xin, size_t fstride,size_t in_stride) { int p = m_stageRadix[stage]; int m = m_stageRemainder[stage]; Complex * Fout_beg = xout; Complex * Fout_end = xout + p*m; if (m>1) { do{ // recursive call: // DFT of size m*p performed by doing // p instances of smaller DFTs of size m, // each one takes a decimated version of the input work(stage+1, xout , xin, fstride*p,in_stride); xin += fstride*in_stride; }while( (xout += m) != Fout_end ); }else{ do{ *xout = *xin; xin += fstride*in_stride; }while(++xout != Fout_end ); } xout=Fout_beg; // recombine the p smaller DFTs switch (p) { case 2: bfly2(xout,fstride,m); break; case 3: bfly3(xout,fstride,m); break; case 4: bfly4(xout,fstride,m); break; case 5: bfly5(xout,fstride,m); break; default: bfly_generic(xout,fstride,m,p); break; } } inline void bfly2( Complex * Fout, const size_t fstride, int m) { for (int k=0;kreal() - Scalar(.5)*scratch[3].real() , Fout->imag() - Scalar(.5)*scratch[3].imag() ); scratch[0] *= epi3.imag(); *Fout += scratch[3]; Fout[m2] = Complex( Fout[m].real() + scratch[0].imag() , Fout[m].imag() - scratch[0].real() ); Fout[m] += Complex( -scratch[0].imag(),scratch[0].real() ); ++Fout; }while(--k); } inline void bfly5( Complex * Fout, const size_t fstride, const size_t m) { Complex *Fout0,*Fout1,*Fout2,*Fout3,*Fout4; size_t u; Complex scratch[13]; Complex * twiddles = &m_twiddles[0]; Complex *tw; Complex ya,yb; ya = twiddles[fstride*m]; yb = twiddles[fstride*2*m]; Fout0=Fout; Fout1=Fout0+m; Fout2=Fout0+2*m; Fout3=Fout0+3*m; Fout4=Fout0+4*m; tw=twiddles; for ( u=0; u(m_twiddles.size()); Complex * scratchbuf = &m_scratchBuf[0]; for ( u=0; u(fstride) * k; if (twidx>=Norig) twidx-=Norig; t=scratchbuf[q] * twiddles[twidx]; Fout[ k ] += t; } k += m; } } } }; template struct kissfft_impl { typedef _Scalar Scalar; typedef std::complex Complex; void clear() { m_plans.clear(); m_realTwiddles.clear(); } inline void fwd( Complex * dst,const Complex *src,int nfft) { get_plan(nfft,false).work(0, dst, src, 1,1); } inline void fwd2( Complex * dst,const Complex *src,int n0,int n1) { EIGEN_UNUSED_VARIABLE(dst); EIGEN_UNUSED_VARIABLE(src); EIGEN_UNUSED_VARIABLE(n0); EIGEN_UNUSED_VARIABLE(n1); } inline void inv2( Complex * dst,const Complex *src,int n0,int n1) { EIGEN_UNUSED_VARIABLE(dst); EIGEN_UNUSED_VARIABLE(src); EIGEN_UNUSED_VARIABLE(n0); EIGEN_UNUSED_VARIABLE(n1); } // real-to-complex forward FFT // perform two FFTs of src even and src odd // then twiddle to recombine them into the half-spectrum format // then fill in the conjugate symmetric half inline void fwd( Complex * dst,const Scalar * src,int nfft) { if ( nfft&3 ) { // use generic mode for odd m_tmpBuf1.resize(nfft); get_plan(nfft,false).work(0, &m_tmpBuf1[0], src, 1,1); std::copy(m_tmpBuf1.begin(),m_tmpBuf1.begin()+(nfft>>1)+1,dst ); }else{ int ncfft = nfft>>1; int ncfft2 = nfft>>2; Complex * rtw = real_twiddles(ncfft2); // use optimized mode for even real fwd( dst, reinterpret_cast (src), ncfft); Complex dc = dst[0].real() + dst[0].imag(); Complex nyquist = dst[0].real() - dst[0].imag(); int k; for ( k=1;k <= ncfft2 ; ++k ) { Complex fpk = dst[k]; Complex fpnk = conj(dst[ncfft-k]); Complex f1k = fpk + fpnk; Complex f2k = fpk - fpnk; Complex tw= f2k * rtw[k-1]; dst[k] = (f1k + tw) * Scalar(.5); dst[ncfft-k] = conj(f1k -tw)*Scalar(.5); } dst[0] = dc; dst[ncfft] = nyquist; } } // inverse complex-to-complex inline void inv(Complex * dst,const Complex *src,int nfft) { get_plan(nfft,true).work(0, dst, src, 1,1); } // half-complex to scalar inline void inv( Scalar * dst,const Complex * src,int nfft) { if (nfft&3) { m_tmpBuf1.resize(nfft); m_tmpBuf2.resize(nfft); std::copy(src,src+(nfft>>1)+1,m_tmpBuf1.begin() ); for (int k=1;k<(nfft>>1)+1;++k) m_tmpBuf1[nfft-k] = conj(m_tmpBuf1[k]); inv(&m_tmpBuf2[0],&m_tmpBuf1[0],nfft); for (int k=0;k>1; int ncfft2 = nfft>>2; Complex * rtw = real_twiddles(ncfft2); m_tmpBuf1.resize(ncfft); m_tmpBuf1[0] = Complex( src[0].real() + src[ncfft].real(), src[0].real() - src[ncfft].real() ); for (int k = 1; k <= ncfft / 2; ++k) { Complex fk = src[k]; Complex fnkc = conj(src[ncfft-k]); Complex fek = fk + fnkc; Complex tmp = fk - fnkc; Complex fok = tmp * conj(rtw[k-1]); m_tmpBuf1[k] = fek + fok; m_tmpBuf1[ncfft-k] = conj(fek - fok); } get_plan(ncfft,true).work(0, reinterpret_cast(dst), &m_tmpBuf1[0], 1,1); } } protected: typedef kiss_cpx_fft PlanData; typedef std::map PlanMap; PlanMap m_plans; std::map > m_realTwiddles; std::vector m_tmpBuf1; std::vector m_tmpBuf2; inline int PlanKey(int nfft, bool isinverse) const { return (nfft<<1) | int(isinverse); } inline PlanData & get_plan(int nfft, bool inverse) { // TODO look for PlanKey(nfft, ! inverse) and conjugate the twiddles PlanData & pd = m_plans[ PlanKey(nfft,inverse) ]; if ( pd.m_twiddles.size() == 0 ) { pd.make_twiddles(nfft,inverse); pd.factorize(nfft); } return pd; } inline Complex * real_twiddles(int ncfft2) { using std::acos; std::vector & twidref = m_realTwiddles[ncfft2];// creates new if not there if ( (int)twidref.size() != ncfft2 ) { twidref.resize(ncfft2); int ncfft= ncfft2<<1; Scalar pi = acos( Scalar(-1) ); for (int k=1;k<=ncfft2;++k) twidref[k-1] = exp( Complex(0,-pi * (Scalar(k) / ncfft + Scalar(.5)) ) ); } return &twidref[0]; } }; } // end namespace internal } // end namespace Eigen /* vim: set filetype=cpp et sw=2 ts=2 ai: */ RcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/0000755000176200001440000000000012501315103023164 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/RandomSetter.h0000644000176200001440000002674212253717461026000 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_RANDOMSETTER_H #define EIGEN_RANDOMSETTER_H namespace Eigen { /** Represents a std::map * * \see RandomSetter */ template struct StdMapTraits { typedef int KeyType; typedef std::map Type; enum { IsSorted = 1 }; static void setInvalidKey(Type&, const KeyType&) {} }; #ifdef EIGEN_UNORDERED_MAP_SUPPORT /** Represents a std::unordered_map * * To use it you need to both define EIGEN_UNORDERED_MAP_SUPPORT and include the unordered_map header file * yourself making sure that unordered_map is defined in the std namespace. * * For instance, with current version of gcc you can either enable C++0x standard (-std=c++0x) or do: * \code * #include * #define EIGEN_UNORDERED_MAP_SUPPORT * namespace std { * using std::tr1::unordered_map; * } * \endcode * * \see RandomSetter */ template struct StdUnorderedMapTraits { typedef int KeyType; typedef std::unordered_map Type; enum { IsSorted = 0 }; static void setInvalidKey(Type&, const KeyType&) {} }; #endif // EIGEN_UNORDERED_MAP_SUPPORT #ifdef _DENSE_HASH_MAP_H_ /** Represents a google::dense_hash_map * * \see RandomSetter */ template struct GoogleDenseHashMapTraits { typedef int KeyType; typedef google::dense_hash_map Type; enum { IsSorted = 0 }; static void setInvalidKey(Type& map, const KeyType& k) { map.set_empty_key(k); } }; #endif #ifdef _SPARSE_HASH_MAP_H_ /** Represents a google::sparse_hash_map * * \see RandomSetter */ template struct GoogleSparseHashMapTraits { typedef int KeyType; typedef google::sparse_hash_map Type; enum { IsSorted = 0 }; static void setInvalidKey(Type&, const KeyType&) {} }; #endif /** \class RandomSetter * * \brief The RandomSetter is a wrapper object allowing to set/update a sparse matrix with random access * * \param SparseMatrixType the type of the sparse matrix we are updating * \param MapTraits a traits class representing the map implementation used for the temporary sparse storage. * Its default value depends on the system. * \param OuterPacketBits defines the number of rows (or columns) manage by a single map object * as a power of two exponent. * * This class temporarily represents a sparse matrix object using a generic map implementation allowing for * efficient random access. The conversion from the compressed representation to a hash_map object is performed * in the RandomSetter constructor, while the sparse matrix is updated back at destruction time. This strategy * suggest the use of nested blocks as in this example: * * \code * SparseMatrix m(rows,cols); * { * RandomSetter > w(m); * // don't use m but w instead with read/write random access to the coefficients: * for(;;) * w(rand(),rand()) = rand; * } * // when w is deleted, the data are copied back to m * // and m is ready to use. * \endcode * * Since hash_map objects are not fully sorted, representing a full matrix as a single hash_map would * involve a big and costly sort to update the compressed matrix back. To overcome this issue, a RandomSetter * use multiple hash_map, each representing 2^OuterPacketBits columns or rows according to the storage order. * To reach optimal performance, this value should be adjusted according to the average number of nonzeros * per rows/columns. * * The possible values for the template parameter MapTraits are: * - \b StdMapTraits: corresponds to std::map. (does not perform very well) * - \b GnuHashMapTraits: corresponds to __gnu_cxx::hash_map (available only with GCC) * - \b GoogleDenseHashMapTraits: corresponds to google::dense_hash_map (best efficiency, reasonable memory consumption) * - \b GoogleSparseHashMapTraits: corresponds to google::sparse_hash_map (best memory consumption, relatively good performance) * * The default map implementation depends on the availability, and the preferred order is: * GoogleSparseHashMapTraits, GnuHashMapTraits, and finally StdMapTraits. * * For performance and memory consumption reasons it is highly recommended to use one of * the Google's hash_map implementation. To enable the support for them, you have two options: * - \#include yourself \b before Eigen/Sparse header * - define EIGEN_GOOGLEHASH_SUPPORT * In the later case the inclusion of is made for you. * * \see http://code.google.com/p/google-sparsehash/ */ template class MapTraits = #if defined _DENSE_HASH_MAP_H_ GoogleDenseHashMapTraits #elif defined _HASH_MAP GnuHashMapTraits #else StdMapTraits #endif ,int OuterPacketBits = 6> class RandomSetter { typedef typename SparseMatrixType::Scalar Scalar; typedef typename SparseMatrixType::Index Index; struct ScalarWrapper { ScalarWrapper() : value(0) {} Scalar value; }; typedef typename MapTraits::KeyType KeyType; typedef typename MapTraits::Type HashMapType; static const int OuterPacketMask = (1 << OuterPacketBits) - 1; enum { SwapStorage = 1 - MapTraits::IsSorted, TargetRowMajor = (SparseMatrixType::Flags & RowMajorBit) ? 1 : 0, SetterRowMajor = SwapStorage ? 1-TargetRowMajor : TargetRowMajor }; public: /** Constructs a random setter object from the sparse matrix \a target * * Note that the initial value of \a target are imported. If you want to re-set * a sparse matrix from scratch, then you must set it to zero first using the * setZero() function. */ inline RandomSetter(SparseMatrixType& target) : mp_target(&target) { const Index outerSize = SwapStorage ? target.innerSize() : target.outerSize(); const Index innerSize = SwapStorage ? target.outerSize() : target.innerSize(); m_outerPackets = outerSize >> OuterPacketBits; if (outerSize&OuterPacketMask) m_outerPackets += 1; m_hashmaps = new HashMapType[m_outerPackets]; // compute number of bits needed to store inner indices Index aux = innerSize - 1; m_keyBitsOffset = 0; while (aux) { ++m_keyBitsOffset; aux = aux >> 1; } KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset)); for (Index k=0; k::setInvalidKey(m_hashmaps[k],ik); // insert current coeffs for (Index j=0; jouterSize(); ++j) for (typename SparseMatrixType::InnerIterator it(*mp_target,j); it; ++it) (*this)(TargetRowMajor?j:it.index(), TargetRowMajor?it.index():j) = it.value(); } /** Destructor updating back the sparse matrix target */ ~RandomSetter() { KeyType keyBitsMask = (1<setZero(); mp_target->makeCompressed(); mp_target->reserve(nonZeros()); Index prevOuter = -1; for (Index k=0; kfirst >> m_keyBitsOffset) + outerOffset; const Index inner = it->first & keyBitsMask; if (prevOuter!=outer) { for (Index j=prevOuter+1;j<=outer;++j) mp_target->startVec(j); prevOuter = outer; } mp_target->insertBackByOuterInner(outer, inner) = it->second.value; } } mp_target->finalize(); } else { VectorXi positions(mp_target->outerSize()); positions.setZero(); // pass 1 for (Index k=0; kfirst & keyBitsMask; ++positions[outer]; } } // prefix sum Index count = 0; for (Index j=0; jouterSize(); ++j) { Index tmp = positions[j]; mp_target->outerIndexPtr()[j] = count; positions[j] = count; count += tmp; } mp_target->makeCompressed(); mp_target->outerIndexPtr()[mp_target->outerSize()] = count; mp_target->resizeNonZeros(count); // pass 2 for (Index k=0; kfirst >> m_keyBitsOffset) + outerOffset; const Index outer = it->first & keyBitsMask; // sorted insertion // Note that we have to deal with at most 2^OuterPacketBits unsorted coefficients, // moreover those 2^OuterPacketBits coeffs are likely to be sparse, an so only a // small fraction of them have to be sorted, whence the following simple procedure: Index posStart = mp_target->outerIndexPtr()[outer]; Index i = (positions[outer]++) - 1; while ( (i >= posStart) && (mp_target->innerIndexPtr()[i] > inner) ) { mp_target->valuePtr()[i+1] = mp_target->valuePtr()[i]; mp_target->innerIndexPtr()[i+1] = mp_target->innerIndexPtr()[i]; --i; } mp_target->innerIndexPtr()[i+1] = inner; mp_target->valuePtr()[i+1] = it->second.value; } } } delete[] m_hashmaps; } /** \returns a reference to the coefficient at given coordinates \a row, \a col */ Scalar& operator() (Index row, Index col) { const Index outer = SetterRowMajor ? row : col; const Index inner = SetterRowMajor ? col : row; const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map const Index outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet const KeyType key = (KeyType(outerMinor)<(m_hashmaps[k].size()); return nz; } protected: HashMapType* m_hashmaps; SparseMatrixType* mp_target; Index m_outerPackets; unsigned char m_keyBitsOffset; }; } // end namespace Eigen #endif // EIGEN_RANDOMSETTER_H RcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h0000644000176200001440000002731112501315103027270 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H #define EIGEN_DYNAMIC_SPARSEMATRIX_H namespace Eigen { /** \deprecated use a SparseMatrix in an uncompressed mode * * \class DynamicSparseMatrix * * \brief A sparse matrix class designed for matrix assembly purpose * * \param _Scalar the scalar type, i.e. the type of the coefficients * * Unlike SparseMatrix, this class provides a much higher degree of flexibility. In particular, it allows * random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is * nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows * otherwise. * * Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might * decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance * till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors. * * \see SparseMatrix */ namespace internal { template struct traits > { typedef _Scalar Scalar; typedef _Index Index; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = Dynamic, ColsAtCompileTime = Dynamic, MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic, Flags = _Options | NestByRefBit | LvalueBit, CoeffReadCost = NumTraits::ReadCost, SupportedAccessPatterns = OuterRandomAccessPattern }; }; } template class DynamicSparseMatrix : public SparseMatrixBase > { public: EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix) // FIXME: why are these operator already alvailable ??? // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, +=) // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=) typedef MappedSparseMatrix Map; using Base::IsRowMajor; using Base::operator=; enum { Options = _Options }; protected: typedef DynamicSparseMatrix TransposedSparseMatrix; Index m_innerSize; std::vector > m_data; public: inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; } inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); } inline Index innerSize() const { return m_innerSize; } inline Index outerSize() const { return static_cast(m_data.size()); } inline Index innerNonZeros(Index j) const { return m_data[j].size(); } std::vector >& _data() { return m_data; } const std::vector >& _data() const { return m_data; } /** \returns the coefficient value at given position \a row, \a col * This operation involes a log(rho*outer_size) binary search. */ inline Scalar coeff(Index row, Index col) const { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; return m_data[outer].at(inner); } /** \returns a reference to the coefficient value at given position \a row, \a col * This operation involes a log(rho*outer_size) binary search. If the coefficient does not * exist yet, then a sorted insertion into a sequential buffer is performed. */ inline Scalar& coeffRef(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; return m_data[outer].atWithInsertion(inner); } class InnerIterator; class ReverseInnerIterator; void setZero() { for (Index j=0; j(m_data[j].size()); return res; } void reserve(Index reserveSize = 1000) { if (outerSize()>0) { Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4)); for (Index j=0; j(m_data[outer].size()) - 1; m_data[outer].resize(id+2,1); while ( (id >= startId) && (m_data[outer].index(id) > inner) ) { m_data[outer].index(id+1) = m_data[outer].index(id); m_data[outer].value(id+1) = m_data[outer].value(id); --id; } m_data[outer].index(id+1) = inner; m_data[outer].value(id+1) = 0; return m_data[outer].value(id+1); } /** Does nothing: provided for compatibility with SparseMatrix */ inline void finalize() {} /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */ void prune(Scalar reference, RealScalar epsilon = NumTraits::dummy_precision()) { for (Index j=0; jinnerSize) { // remove all coefficients with innerCoord>=innerSize // TODO //std::cerr << "not implemented yet\n"; exit(2); } if (m_data.size() != outerSize) { m_data.resize(outerSize); } } /** The class DynamicSparseMatrix is deprectaed */ EIGEN_DEPRECATED inline DynamicSparseMatrix() : m_innerSize(0), m_data(0) { eigen_assert(innerSize()==0 && outerSize()==0); } /** The class DynamicSparseMatrix is deprectaed */ EIGEN_DEPRECATED inline DynamicSparseMatrix(Index rows, Index cols) : m_innerSize(0) { resize(rows, cols); } /** The class DynamicSparseMatrix is deprectaed */ template EIGEN_DEPRECATED explicit inline DynamicSparseMatrix(const SparseMatrixBase& other) : m_innerSize(0) { Base::operator=(other.derived()); } inline DynamicSparseMatrix(const DynamicSparseMatrix& other) : Base(), m_innerSize(0) { *this = other.derived(); } inline void swap(DynamicSparseMatrix& other) { //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); std::swap(m_innerSize, other.m_innerSize); //std::swap(m_outerSize, other.m_outerSize); m_data.swap(other.m_data); } inline DynamicSparseMatrix& operator=(const DynamicSparseMatrix& other) { if (other.isRValue()) { swap(other.const_cast_derived()); } else { resize(other.rows(), other.cols()); m_data = other.m_data; } return *this; } /** Destructor */ inline ~DynamicSparseMatrix() {} public: /** \deprecated * Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */ EIGEN_DEPRECATED void startFill(Index reserveSize = 1000) { setZero(); reserve(reserveSize); } /** \deprecated use insert() * inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that: * 1 - the coefficient does not exist yet * 2 - this the coefficient with greater inner coordinate for the given outer coordinate. * In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates * \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid. * * \see fillrand(), coeffRef() */ EIGEN_DEPRECATED Scalar& fill(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; return insertBack(outer,inner); } /** \deprecated use insert() * Like fill() but with random inner coordinates. * Compared to the generic coeffRef(), the unique limitation is that we assume * the coefficient does not exist yet. */ EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col) { return insert(row,col); } /** \deprecated use finalize() * Does nothing. Provided for compatibility with SparseMatrix. */ EIGEN_DEPRECATED void endFill() {} # ifdef EIGEN_DYNAMICSPARSEMATRIX_PLUGIN # include EIGEN_DYNAMICSPARSEMATRIX_PLUGIN # endif }; template class DynamicSparseMatrix::InnerIterator : public SparseVector::InnerIterator { typedef typename SparseVector::InnerIterator Base; public: InnerIterator(const DynamicSparseMatrix& mat, Index outer) : Base(mat.m_data[outer]), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : Base::index(); } inline Index col() const { return IsRowMajor ? Base::index() : m_outer; } protected: const Index m_outer; }; template class DynamicSparseMatrix::ReverseInnerIterator : public SparseVector::ReverseInnerIterator { typedef typename SparseVector::ReverseInnerIterator Base; public: ReverseInnerIterator(const DynamicSparseMatrix& mat, Index outer) : Base(mat.m_data[outer]), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : Base::index(); } inline Index col() const { return IsRowMajor ? Base::index() : m_outer; } protected: const Index m_outer; }; } // end namespace Eigen #endif // EIGEN_DYNAMIC_SPARSEMATRIX_H RcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/MarketIO.h0000644000176200001440000001646012253717461025040 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // Copyright (C) 2012 Desire NUENTSA WAKAM // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_MARKET_IO_H #define EIGEN_SPARSE_MARKET_IO_H #include namespace Eigen { namespace internal { template inline bool GetMarketLine (std::stringstream& line, int& M, int& N, int& i, int& j, Scalar& value) { line >> i >> j >> value; i--; j--; if(i>=0 && j>=0 && i inline bool GetMarketLine (std::stringstream& line, int& M, int& N, int& i, int& j, std::complex& value) { Scalar valR, valI; line >> i >> j >> valR >> valI; i--; j--; if(i>=0 && j>=0 && i(valR, valI); return true; } else return false; } template inline void GetVectorElt (const std::string& line, RealScalar& val) { std::istringstream newline(line); newline >> val; } template inline void GetVectorElt (const std::string& line, std::complex& val) { RealScalar valR, valI; std::istringstream newline(line); newline >> valR >> valI; val = std::complex(valR, valI); } template inline void putMarketHeader(std::string& header,int sym) { header= "%%MatrixMarket matrix coordinate "; if(internal::is_same >::value || internal::is_same >::value) { header += " complex"; if(sym == Symmetric) header += " symmetric"; else if (sym == SelfAdjoint) header += " Hermitian"; else header += " general"; } else { header += " real"; if(sym == Symmetric) header += " symmetric"; else header += " general"; } } template inline void PutMatrixElt(Scalar value, int row, int col, std::ofstream& out) { out << row << " "<< col << " " << value << "\n"; } template inline void PutMatrixElt(std::complex value, int row, int col, std::ofstream& out) { out << row << " " << col << " " << value.real() << " " << value.imag() << "\n"; } template inline void putVectorElt(Scalar value, std::ofstream& out) { out << value << "\n"; } template inline void putVectorElt(std::complex value, std::ofstream& out) { out << value.real << " " << value.imag()<< "\n"; } } // end namepsace internal inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscomplex, bool& isvector) { sym = 0; isvector = false; std::ifstream in(filename.c_str(),std::ios::in); if(!in) return false; std::string line; // The matrix header is always the first line in the file std::getline(in, line); eigen_assert(in.good()); std::stringstream fmtline(line); std::string substr[5]; fmtline>> substr[0] >> substr[1] >> substr[2] >> substr[3] >> substr[4]; if(substr[2].compare("array") == 0) isvector = true; if(substr[3].compare("complex") == 0) iscomplex = true; if(substr[4].compare("symmetric") == 0) sym = Symmetric; else if (substr[4].compare("Hermitian") == 0) sym = SelfAdjoint; return true; } template bool loadMarket(SparseMatrixType& mat, const std::string& filename) { typedef typename SparseMatrixType::Scalar Scalar; std::ifstream input(filename.c_str(),std::ios::in); if(!input) return false; const int maxBuffersize = 2048; char buffer[maxBuffersize]; bool readsizes = false; typedef Triplet T; std::vector elements; int M(-1), N(-1), NNZ(-1); int count = 0; while(input.getline(buffer, maxBuffersize)) { // skip comments //NOTE An appropriate test should be done on the header to get the symmetry if(buffer[0]=='%') continue; std::stringstream line(buffer); if(!readsizes) { line >> M >> N >> NNZ; if(M > 0 && N > 0 && NNZ > 0) { readsizes = true; std::cout << "sizes: " << M << "," << N << "," << NNZ << "\n"; mat.resize(M,N); mat.reserve(NNZ); } } else { int i(-1), j(-1); Scalar value; if( internal::GetMarketLine(line, M, N, i, j, value) ) { ++ count; elements.push_back(T(i,j,value)); } else std::cerr << "Invalid read: " << i << "," << j << "\n"; } } mat.setFromTriplets(elements.begin(), elements.end()); if(count!=NNZ) std::cerr << count << "!=" << NNZ << "\n"; input.close(); return true; } template bool loadMarketVector(VectorType& vec, const std::string& filename) { typedef typename VectorType::Scalar Scalar; std::ifstream in(filename.c_str(), std::ios::in); if(!in) return false; std::string line; int n(0), col(0); do { // Skip comments std::getline(in, line); eigen_assert(in.good()); } while (line[0] == '%'); std::istringstream newline(line); newline >> n >> col; eigen_assert(n>0 && col>0); vec.resize(n); int i = 0; Scalar value; while ( std::getline(in, line) && (i < n) ){ internal::GetVectorElt(line, value); vec(i++) = value; } in.close(); if (i!=n){ std::cerr<< "Unable to read all elements from file " << filename << "\n"; return false; } return true; } template bool saveMarket(const SparseMatrixType& mat, const std::string& filename, int sym = 0) { typedef typename SparseMatrixType::Scalar Scalar; std::ofstream out(filename.c_str(),std::ios::out); if(!out) return false; out.flags(std::ios_base::scientific); out.precision(64); std::string header; internal::putMarketHeader(header, sym); out << header << std::endl; out << mat.rows() << " " << mat.cols() << " " << mat.nonZeros() << "\n"; int count = 0; for(int j=0; j bool saveMarketVector (const VectorType& vec, const std::string& filename) { typedef typename VectorType::Scalar Scalar; std::ofstream out(filename.c_str(),std::ios::out); if(!out) return false; out.flags(std::ios_base::scientific); out.precision(64); if(internal::is_same >::value || internal::is_same >::value) out << "%%MatrixMarket matrix array complex general\n"; else out << "%%MatrixMarket matrix array real general\n"; out << vec.size() << " "<< 1 << "\n"; for (int i=0; i < vec.size(); i++){ internal::putVectorElt(vec(i), out); } out.close(); return true; } } // end namespace Eigen #endif // EIGEN_SPARSE_MARKET_IO_H RcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h0000644000176200001440000001557212305644741027510 0ustar liggesusers // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Desire NUENTSA WAKAM // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BROWSE_MATRICES_H #define EIGEN_BROWSE_MATRICES_H namespace Eigen { enum { SPD = 0x100, NonSymmetric = 0x0 }; /** * @brief Iterator to browse matrices from a specified folder * * This is used to load all the matrices from a folder. * The matrices should be in Matrix Market format * It is assumed that the matrices are named as matname.mtx * and matname_SPD.mtx if the matrix is Symmetric and positive definite (or Hermitian) * The right hand side vectors are loaded as well, if they exist. * They should be named as matname_b.mtx. * Note that the right hand side for a SPD matrix is named as matname_SPD_b.mtx * * Sometimes a reference solution is available. In this case, it should be named as matname_x.mtx * * Sample code * \code * * \endcode * * \tparam Scalar The scalar type */ template class MatrixMarketIterator { public: typedef Matrix VectorType; typedef SparseMatrix MatrixType; public: MatrixMarketIterator(const std::string folder):m_sym(0),m_isvalid(false),m_matIsLoaded(false),m_hasRhs(false),m_hasrefX(false),m_folder(folder) { m_folder_id = opendir(folder.c_str()); if (!m_folder_id){ m_isvalid = false; std::cerr << "The provided Matrix folder could not be opened \n\n"; abort(); } Getnextvalidmatrix(); } ~MatrixMarketIterator() { if (m_folder_id) closedir(m_folder_id); } inline MatrixMarketIterator& operator++() { m_matIsLoaded = false; m_hasrefX = false; m_hasRhs = false; Getnextvalidmatrix(); return *this; } inline operator bool() const { return m_isvalid;} /** Return the sparse matrix corresponding to the current file */ inline MatrixType& matrix() { // Read the matrix if (m_matIsLoaded) return m_mat; std::string matrix_file = m_folder + "/" + m_matname + ".mtx"; if ( !loadMarket(m_mat, matrix_file)) { m_matIsLoaded = false; return m_mat; } m_matIsLoaded = true; if (m_sym != NonSymmetric) { // Store the upper part of the matrix. It is needed by the solvers dealing with nonsymmetric matrices ?? MatrixType B; B = m_mat; m_mat = B.template selfadjointView(); } return m_mat; } /** Return the right hand side corresponding to the current matrix. * If the rhs file is not provided, a random rhs is generated */ inline VectorType& rhs() { // Get the right hand side if (m_hasRhs) return m_rhs; std::string rhs_file; rhs_file = m_folder + "/" + m_matname + "_b.mtx"; // The pattern is matname_b.mtx m_hasRhs = Fileexists(rhs_file); if (m_hasRhs) { m_rhs.resize(m_mat.cols()); m_hasRhs = loadMarketVector(m_rhs, rhs_file); } if (!m_hasRhs) { // Generate a random right hand side if (!m_matIsLoaded) this->matrix(); m_refX.resize(m_mat.cols()); m_refX.setRandom(); m_rhs = m_mat * m_refX; m_hasrefX = true; m_hasRhs = true; } return m_rhs; } /** Return a reference solution * If it is not provided and if the right hand side is not available * then refX is randomly generated such that A*refX = b * where A and b are the matrix and the rhs. * Note that when a rhs is provided, refX is not available */ inline VectorType& refX() { // Check if a reference solution is provided if (m_hasrefX) return m_refX; std::string lhs_file; lhs_file = m_folder + "/" + m_matname + "_x.mtx"; m_hasrefX = Fileexists(lhs_file); if (m_hasrefX) { m_refX.resize(m_mat.cols()); m_hasrefX = loadMarketVector(m_refX, lhs_file); } return m_refX; } inline std::string& matname() { return m_matname; } inline int sym() { return m_sym; } inline bool hasRhs() {return m_hasRhs; } inline bool hasrefX() {return m_hasrefX; } protected: inline bool Fileexists(std::string file) { std::ifstream file_id(file.c_str()); if (!file_id.good() ) { return false; } else { file_id.close(); return true; } } void Getnextvalidmatrix( ) { m_isvalid = false; // Here, we return with the next valid matrix in the folder while ( (m_curs_id = readdir(m_folder_id)) != NULL) { m_isvalid = false; std::string curfile; curfile = m_folder + "/" + m_curs_id->d_name; // Discard if it is a folder #if !(defined(__sun) || defined(_AIX) || defined(__hpux) || defined(__sgi)) if (m_curs_id->d_type == DT_DIR) continue; //FIXME This may not be available on non BSD systems #endif // struct stat st_buf; // stat (curfile.c_str(), &st_buf); // if (S_ISDIR(st_buf.st_mode)) continue; // Determine from the header if it is a matrix or a right hand side bool isvector,iscomplex=false; if(!getMarketHeader(curfile,m_sym,iscomplex,isvector)) continue; if(isvector) continue; if (!iscomplex) { if(internal::is_same >::value || internal::is_same >::value) continue; } if (iscomplex) { if(internal::is_same::value || internal::is_same::value) continue; } // Get the matrix name std::string filename = m_curs_id->d_name; m_matname = filename.substr(0, filename.length()-4); // Find if the matrix is SPD size_t found = m_matname.find("SPD"); if( (found!=std::string::npos) && (m_sym != NonSymmetric) ) m_sym = SPD; m_isvalid = true; break; } } int m_sym; // Symmetry of the matrix MatrixType m_mat; // Current matrix VectorType m_rhs; // Current vector VectorType m_refX; // The reference solution, if exists std::string m_matname; // Matrix Name bool m_isvalid; bool m_matIsLoaded; // Determine if the matrix has already been loaded from the file bool m_hasRhs; // The right hand side exists bool m_hasrefX; // A reference solution is provided std::string m_folder; DIR * m_folder_id; struct dirent *m_curs_id; }; } // end namespace Eigen #endif RcppEigen/inst/include/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h0000644000176200001440000001024412253717461030546 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H #define EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H namespace Eigen { #if 0 // NOTE Have to be reimplemented as a specialization of BlockImpl< DynamicSparseMatrix<_Scalar, _Options, _Index>, ... > // See SparseBlock.h for an example /*************************************************************************** * specialisation for DynamicSparseMatrix ***************************************************************************/ template class SparseInnerVectorSet, Size> : public SparseMatrixBase, Size> > { typedef DynamicSparseMatrix<_Scalar, _Options, _Index> MatrixType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet) class InnerIterator: public MatrixType::InnerIterator { public: inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer) : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) { eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); } inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) { eigen_assert(Size!=Dynamic); eigen_assert( (outer>=0) && (outer inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) { if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit)) { // need to transpose => perform a block evaluation followed by a big swap DynamicSparseMatrix aux(other); *this = aux.markAsRValue(); } else { // evaluate/copy vector per vector for (Index j=0; j aux(other.innerVector(j)); m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data()); } } return *this; } inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other) { return operator=(other); } Index nonZeros() const { Index count = 0; for (Index j=0; j0); return m_matrix.data()[m_outerStart].vale(m_matrix.data()[m_outerStart].size()-1); } // template // inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) // { // return *this; // } EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: const typename MatrixType::Nested m_matrix; Index m_outerStart; const internal::variable_if_dynamic m_outerSize; }; #endif } // end namespace Eigen #endif // EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H RcppEigen/inst/include/unsupported/Eigen/src/Skyline/0000755000176200001440000000000012253717461022362 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/Skyline/SkylineProduct.h0000644000176200001440000002513212253717461025515 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Guillaume Saupin // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINEPRODUCT_H #define EIGEN_SKYLINEPRODUCT_H namespace Eigen { template struct SkylineProductReturnType { typedef const typename internal::nested::type LhsNested; typedef const typename internal::nested::type RhsNested; typedef SkylineProduct Type; }; template struct internal::traits > { // clean the nested types: typedef typename internal::remove_all::type _LhsNested; typedef typename internal::remove_all::type _RhsNested; typedef typename _LhsNested::Scalar Scalar; enum { LhsCoeffReadCost = _LhsNested::CoeffReadCost, RhsCoeffReadCost = _RhsNested::CoeffReadCost, LhsFlags = _LhsNested::Flags, RhsFlags = _RhsNested::Flags, RowsAtCompileTime = _LhsNested::RowsAtCompileTime, ColsAtCompileTime = _RhsNested::ColsAtCompileTime, InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime), MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime, MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime, EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit), ResultIsSkyline = ProductMode == SkylineTimeSkylineProduct, RemovedBits = ~((EvalToRowMajor ? 0 : RowMajorBit) | (ResultIsSkyline ? 0 : SkylineBit)), Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits) | EvalBeforeAssigningBit | EvalBeforeNestingBit, CoeffReadCost = Dynamic }; typedef typename internal::conditional >, MatrixBase > >::type Base; }; namespace internal { template class SkylineProduct : no_assignment_operator, public traits >::Base { public: EIGEN_GENERIC_PUBLIC_INTERFACE(SkylineProduct) private: typedef typename traits::_LhsNested _LhsNested; typedef typename traits::_RhsNested _RhsNested; public: template EIGEN_STRONG_INLINE SkylineProduct(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { eigen_assert(lhs.cols() == rhs.rows()); enum { ProductIsValid = _LhsNested::ColsAtCompileTime == Dynamic || _RhsNested::RowsAtCompileTime == Dynamic || int(_LhsNested::ColsAtCompileTime) == int(_RhsNested::RowsAtCompileTime), AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime, SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested, _RhsNested) }; // note to the lost user: // * for a dot product use: v1.dot(v2) // * for a coeff-wise product use: v1.cwise()*v2 EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) } EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); } EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } protected: LhsNested m_lhs; RhsNested m_rhs; }; // dense = skyline * dense // Note that here we force no inlining and separate the setZero() because GCC messes up otherwise template EIGEN_DONT_INLINE void skyline_row_major_time_dense_product(const Lhs& lhs, const Rhs& rhs, Dest& dst) { typedef typename remove_all::type _Lhs; typedef typename remove_all::type _Rhs; typedef typename traits::Scalar Scalar; enum { LhsIsRowMajor = (_Lhs::Flags & RowMajorBit) == RowMajorBit, LhsIsSelfAdjoint = (_Lhs::Flags & SelfAdjointBit) == SelfAdjointBit, ProcessFirstHalf = LhsIsSelfAdjoint && (((_Lhs::Flags & (UpperTriangularBit | LowerTriangularBit)) == 0) || ((_Lhs::Flags & UpperTriangularBit) && !LhsIsRowMajor) || ((_Lhs::Flags & LowerTriangularBit) && LhsIsRowMajor)), ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf) }; //Use matrix diagonal part <- Improvement : use inner iterator on dense matrix. for (Index col = 0; col < rhs.cols(); col++) { for (Index row = 0; row < lhs.rows(); row++) { dst(row, col) = lhs.coeffDiag(row) * rhs(row, col); } } //Use matrix lower triangular part for (Index row = 0; row < lhs.rows(); row++) { typename _Lhs::InnerLowerIterator lIt(lhs, row); const Index stop = lIt.col() + lIt.size(); for (Index col = 0; col < rhs.cols(); col++) { Index k = lIt.col(); Scalar tmp = 0; while (k < stop) { tmp += lIt.value() * rhs(k++, col); ++lIt; } dst(row, col) += tmp; lIt += -lIt.size(); } } //Use matrix upper triangular part for (Index lhscol = 0; lhscol < lhs.cols(); lhscol++) { typename _Lhs::InnerUpperIterator uIt(lhs, lhscol); const Index stop = uIt.size() + uIt.row(); for (Index rhscol = 0; rhscol < rhs.cols(); rhscol++) { const Scalar rhsCoeff = rhs.coeff(lhscol, rhscol); Index k = uIt.row(); while (k < stop) { dst(k++, rhscol) += uIt.value() * rhsCoeff; ++uIt; } uIt += -uIt.size(); } } } template EIGEN_DONT_INLINE void skyline_col_major_time_dense_product(const Lhs& lhs, const Rhs& rhs, Dest& dst) { typedef typename remove_all::type _Lhs; typedef typename remove_all::type _Rhs; typedef typename traits::Scalar Scalar; enum { LhsIsRowMajor = (_Lhs::Flags & RowMajorBit) == RowMajorBit, LhsIsSelfAdjoint = (_Lhs::Flags & SelfAdjointBit) == SelfAdjointBit, ProcessFirstHalf = LhsIsSelfAdjoint && (((_Lhs::Flags & (UpperTriangularBit | LowerTriangularBit)) == 0) || ((_Lhs::Flags & UpperTriangularBit) && !LhsIsRowMajor) || ((_Lhs::Flags & LowerTriangularBit) && LhsIsRowMajor)), ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf) }; //Use matrix diagonal part <- Improvement : use inner iterator on dense matrix. for (Index col = 0; col < rhs.cols(); col++) { for (Index row = 0; row < lhs.rows(); row++) { dst(row, col) = lhs.coeffDiag(row) * rhs(row, col); } } //Use matrix upper triangular part for (Index row = 0; row < lhs.rows(); row++) { typename _Lhs::InnerUpperIterator uIt(lhs, row); const Index stop = uIt.col() + uIt.size(); for (Index col = 0; col < rhs.cols(); col++) { Index k = uIt.col(); Scalar tmp = 0; while (k < stop) { tmp += uIt.value() * rhs(k++, col); ++uIt; } dst(row, col) += tmp; uIt += -uIt.size(); } } //Use matrix lower triangular part for (Index lhscol = 0; lhscol < lhs.cols(); lhscol++) { typename _Lhs::InnerLowerIterator lIt(lhs, lhscol); const Index stop = lIt.size() + lIt.row(); for (Index rhscol = 0; rhscol < rhs.cols(); rhscol++) { const Scalar rhsCoeff = rhs.coeff(lhscol, rhscol); Index k = lIt.row(); while (k < stop) { dst(k++, rhscol) += lIt.value() * rhsCoeff; ++lIt; } lIt += -lIt.size(); } } } template::Flags&RowMajorBit> struct skyline_product_selector; template struct skyline_product_selector { typedef typename traits::type>::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType & res) { skyline_row_major_time_dense_product (lhs, rhs, res); } }; template struct skyline_product_selector { typedef typename traits::type>::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType & res) { skyline_col_major_time_dense_product (lhs, rhs, res); } }; } // end namespace internal // template // template // Derived & MatrixBase::lazyAssign(const SkylineProduct& product) { // typedef typename internal::remove_all::type _Lhs; // internal::skyline_product_selector::type, // typename internal::remove_all::type, // Derived>::run(product.lhs(), product.rhs(), derived()); // // return derived(); // } // skyline * dense template template EIGEN_STRONG_INLINE const typename SkylineProductReturnType::Type SkylineMatrixBase::operator*(const MatrixBase &other) const { return typename SkylineProductReturnType::Type(derived(), other.derived()); } } // end namespace Eigen #endif // EIGEN_SKYLINEPRODUCT_H RcppEigen/inst/include/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h0000644000176200001440000002613612253717461025716 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Guillaume Saupin // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINEINPLACELU_H #define EIGEN_SKYLINEINPLACELU_H namespace Eigen { /** \ingroup Skyline_Module * * \class SkylineInplaceLU * * \brief Inplace LU decomposition of a skyline matrix and associated features * * \param MatrixType the type of the matrix of which we are computing the LU factorization * */ template class SkylineInplaceLU { protected: typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef typename NumTraits::Real RealScalar; public: /** Creates a LU object and compute the respective factorization of \a matrix using * flags \a flags. */ SkylineInplaceLU(MatrixType& matrix, int flags = 0) : /*m_matrix(matrix.rows(), matrix.cols()),*/ m_flags(flags), m_status(0), m_lu(matrix) { m_precision = RealScalar(0.1) * Eigen::dummy_precision (); m_lu.IsRowMajor ? computeRowMajor() : compute(); } /** Sets the relative threshold value used to prune zero coefficients during the decomposition. * * Setting a value greater than zero speeds up computation, and yields to an imcomplete * factorization with fewer non zero coefficients. Such approximate factors are especially * useful to initialize an iterative solver. * * Note that the exact meaning of this parameter might depends on the actual * backend. Moreover, not all backends support this feature. * * \sa precision() */ void setPrecision(RealScalar v) { m_precision = v; } /** \returns the current precision. * * \sa setPrecision() */ RealScalar precision() const { return m_precision; } /** Sets the flags. Possible values are: * - CompleteFactorization * - IncompleteFactorization * - MemoryEfficient * - one of the ordering methods * - etc... * * \sa flags() */ void setFlags(int f) { m_flags = f; } /** \returns the current flags */ int flags() const { return m_flags; } void setOrderingMethod(int m) { m_flags = m; } int orderingMethod() const { return m_flags; } /** Computes/re-computes the LU factorization */ void compute(); void computeRowMajor(); /** \returns the lower triangular matrix L */ //inline const MatrixType& matrixL() const { return m_matrixL; } /** \returns the upper triangular matrix U */ //inline const MatrixType& matrixU() const { return m_matrixU; } template bool solve(const MatrixBase &b, MatrixBase* x, const int transposed = 0) const; /** \returns true if the factorization succeeded */ inline bool succeeded(void) const { return m_succeeded; } protected: RealScalar m_precision; int m_flags; mutable int m_status; bool m_succeeded; MatrixType& m_lu; }; /** Computes / recomputes the in place LU decomposition of the SkylineInplaceLU. * using the default algorithm. */ template //template void SkylineInplaceLU::compute() { const size_t rows = m_lu.rows(); const size_t cols = m_lu.cols(); eigen_assert(rows == cols && "We do not (yet) support rectangular LU."); eigen_assert(!m_lu.IsRowMajor && "LU decomposition does not work with rowMajor Storage"); for (Index row = 0; row < rows; row++) { const double pivot = m_lu.coeffDiag(row); //Lower matrix Columns update const Index& col = row; for (typename MatrixType::InnerLowerIterator lIt(m_lu, col); lIt; ++lIt) { lIt.valueRef() /= pivot; } //Upper matrix update -> contiguous memory access typename MatrixType::InnerLowerIterator lIt(m_lu, col); for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) { typename MatrixType::InnerUpperIterator uItPivot(m_lu, row); typename MatrixType::InnerUpperIterator uIt(m_lu, rrow); const double coef = lIt.value(); uItPivot += (rrow - row - 1); //update upper part -> contiguous memory access for (++uItPivot; uIt && uItPivot;) { uIt.valueRef() -= uItPivot.value() * coef; ++uIt; ++uItPivot; } ++lIt; } //Upper matrix update -> non contiguous memory access typename MatrixType::InnerLowerIterator lIt3(m_lu, col); for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) { typename MatrixType::InnerUpperIterator uItPivot(m_lu, row); const double coef = lIt3.value(); //update lower part -> non contiguous memory access for (Index i = 0; i < rrow - row - 1; i++) { m_lu.coeffRefLower(rrow, row + i + 1) -= uItPivot.value() * coef; ++uItPivot; } ++lIt3; } //update diag -> contiguous typename MatrixType::InnerLowerIterator lIt2(m_lu, col); for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) { typename MatrixType::InnerUpperIterator uItPivot(m_lu, row); typename MatrixType::InnerUpperIterator uIt(m_lu, rrow); const double coef = lIt2.value(); uItPivot += (rrow - row - 1); m_lu.coeffRefDiag(rrow) -= uItPivot.value() * coef; ++lIt2; } } } template void SkylineInplaceLU::computeRowMajor() { const size_t rows = m_lu.rows(); const size_t cols = m_lu.cols(); eigen_assert(rows == cols && "We do not (yet) support rectangular LU."); eigen_assert(m_lu.IsRowMajor && "You're trying to apply rowMajor decomposition on a ColMajor matrix !"); for (Index row = 0; row < rows; row++) { typename MatrixType::InnerLowerIterator llIt(m_lu, row); for (Index col = llIt.col(); col < row; col++) { if (m_lu.coeffExistLower(row, col)) { const double diag = m_lu.coeffDiag(col); typename MatrixType::InnerLowerIterator lIt(m_lu, row); typename MatrixType::InnerUpperIterator uIt(m_lu, col); const Index offset = lIt.col() - uIt.row(); Index stop = offset > 0 ? col - lIt.col() : col - uIt.row(); //#define VECTORIZE #ifdef VECTORIZE Map rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop); Map colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop); Scalar newCoeff = m_lu.coeffLower(row, col) - rowVal.dot(colVal); #else if (offset > 0) //Skip zero value of lIt uIt += offset; else //Skip zero values of uIt lIt += -offset; Scalar newCoeff = m_lu.coeffLower(row, col); for (Index k = 0; k < stop; ++k) { const Scalar tmp = newCoeff; newCoeff = tmp - lIt.value() * uIt.value(); ++lIt; ++uIt; } #endif m_lu.coeffRefLower(row, col) = newCoeff / diag; } } //Upper matrix update const Index col = row; typename MatrixType::InnerUpperIterator uuIt(m_lu, col); for (Index rrow = uuIt.row(); rrow < col; rrow++) { typename MatrixType::InnerLowerIterator lIt(m_lu, rrow); typename MatrixType::InnerUpperIterator uIt(m_lu, col); const Index offset = lIt.col() - uIt.row(); Index stop = offset > 0 ? rrow - lIt.col() : rrow - uIt.row(); #ifdef VECTORIZE Map rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop); Map colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop); Scalar newCoeff = m_lu.coeffUpper(rrow, col) - rowVal.dot(colVal); #else if (offset > 0) //Skip zero value of lIt uIt += offset; else //Skip zero values of uIt lIt += -offset; Scalar newCoeff = m_lu.coeffUpper(rrow, col); for (Index k = 0; k < stop; ++k) { const Scalar tmp = newCoeff; newCoeff = tmp - lIt.value() * uIt.value(); ++lIt; ++uIt; } #endif m_lu.coeffRefUpper(rrow, col) = newCoeff; } //Diag matrix update typename MatrixType::InnerLowerIterator lIt(m_lu, row); typename MatrixType::InnerUpperIterator uIt(m_lu, row); const Index offset = lIt.col() - uIt.row(); Index stop = offset > 0 ? lIt.size() : uIt.size(); #ifdef VECTORIZE Map rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop); Map colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop); Scalar newCoeff = m_lu.coeffDiag(row) - rowVal.dot(colVal); #else if (offset > 0) //Skip zero value of lIt uIt += offset; else //Skip zero values of uIt lIt += -offset; Scalar newCoeff = m_lu.coeffDiag(row); for (Index k = 0; k < stop; ++k) { const Scalar tmp = newCoeff; newCoeff = tmp - lIt.value() * uIt.value(); ++lIt; ++uIt; } #endif m_lu.coeffRefDiag(row) = newCoeff; } } /** Computes *x = U^-1 L^-1 b * * If \a transpose is set to SvTranspose or SvAdjoint, the solution * of the transposed/adjoint system is computed instead. * * Not all backends implement the solution of the transposed or * adjoint system. */ template template bool SkylineInplaceLU::solve(const MatrixBase &b, MatrixBase* x, const int transposed) const { const size_t rows = m_lu.rows(); const size_t cols = m_lu.cols(); for (Index row = 0; row < rows; row++) { x->coeffRef(row) = b.coeff(row); Scalar newVal = x->coeff(row); typename MatrixType::InnerLowerIterator lIt(m_lu, row); Index col = lIt.col(); while (lIt.col() < row) { newVal -= x->coeff(col++) * lIt.value(); ++lIt; } x->coeffRef(row) = newVal; } for (Index col = rows - 1; col > 0; col--) { x->coeffRef(col) = x->coeff(col) / m_lu.coeffDiag(col); const Scalar x_col = x->coeff(col); typename MatrixType::InnerUpperIterator uIt(m_lu, col); uIt += uIt.size()-1; while (uIt) { x->coeffRef(uIt.row()) -= x_col * uIt.value(); //TODO : introduce --operator uIt += -1; } } x->coeffRef(0) = x->coeff(0) / m_lu.coeffDiag(0); return true; } } // end namespace Eigen #endif // EIGEN_SKYLINELU_H RcppEigen/inst/include/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h0000644000176200001440000001710112253717461026131 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Guillaume Saupin // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINEMATRIXBASE_H #define EIGEN_SKYLINEMATRIXBASE_H #include "SkylineUtil.h" namespace Eigen { /** \ingroup Skyline_Module * * \class SkylineMatrixBase * * \brief Base class of any skyline matrices or skyline expressions * * \param Derived * */ template class SkylineMatrixBase : public EigenBase { public: typedef typename internal::traits::Scalar Scalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::index::type Index; enum { RowsAtCompileTime = internal::traits::RowsAtCompileTime, /**< The number of rows at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ ColsAtCompileTime = internal::traits::ColsAtCompileTime, /**< The number of columns at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, internal::traits::ColsAtCompileTime>::ret), /**< This is equal to the number of coefficients, i.e. the number of * rows times the number of columns, or to \a Dynamic if this is not * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, MaxSizeAtCompileTime = (internal::size_at_compile_time::ret), IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1, /**< This is set to true if either the number of rows or the number of * columns is known at compile-time to be equal to 1. Indeed, in that case, * we are dealing with a column-vector (if there is only one column) or with * a row-vector (if there is only one row). */ Flags = internal::traits::Flags, /**< This stores expression \ref flags flags which may or may not be inherited by new expressions * constructed from this one. See the \ref flags "list of flags". */ CoeffReadCost = internal::traits::CoeffReadCost, /**< This is a rough measure of how expensive it is to read one coefficient from * this expression. */ IsRowMajor = Flags & RowMajorBit ? 1 : 0 }; #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is the "real scalar" type; if the \a Scalar type is already real numbers * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If * \a Scalar is \a std::complex then RealScalar is \a T. * * \sa class NumTraits */ typedef typename NumTraits::Real RealScalar; /** type of the equivalent square matrix */ typedef Matrix SquareMatrixType; inline const Derived& derived() const { return *static_cast (this); } inline Derived& derived() { return *static_cast (this); } inline Derived& const_cast_derived() const { return *static_cast (const_cast (this)); } #endif // not EIGEN_PARSED_BY_DOXYGEN /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ inline Index rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ inline Index cols() const { return derived().cols(); } /** \returns the number of coefficients, which is \a rows()*cols(). * \sa rows(), cols(), SizeAtCompileTime. */ inline Index size() const { return rows() * cols(); } /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ inline Index nonZeros() const { return derived().nonZeros(); } /** \returns the size of the storage major dimension, * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */ Index outerSize() const { return (int(Flags) & RowMajorBit) ? this->rows() : this->cols(); } /** \returns the size of the inner dimension according to the storage order, * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */ Index innerSize() const { return (int(Flags) & RowMajorBit) ? this->cols() : this->rows(); } bool isRValue() const { return m_isRValue; } Derived& markAsRValue() { m_isRValue = true; return derived(); } SkylineMatrixBase() : m_isRValue(false) { /* TODO check flags */ } inline Derived & operator=(const Derived& other) { this->operator= (other); return derived(); } template inline void assignGeneric(const OtherDerived& other) { derived().resize(other.rows(), other.cols()); for (Index row = 0; row < rows(); row++) for (Index col = 0; col < cols(); col++) { if (other.coeff(row, col) != Scalar(0)) derived().insert(row, col) = other.coeff(row, col); } derived().finalize(); } template inline Derived & operator=(const SkylineMatrixBase& other) { //TODO } template inline Derived & operator=(const SkylineProduct& product); friend std::ostream & operator <<(std::ostream & s, const SkylineMatrixBase& m) { s << m.derived(); return s; } template const typename SkylineProductReturnType::Type operator*(const MatrixBase &other) const; /** \internal use operator= */ template void evalTo(MatrixBase& dst) const { dst.setZero(); for (Index i = 0; i < rows(); i++) for (Index j = 0; j < rows(); j++) dst(i, j) = derived().coeff(i, j); } Matrix toDense() const { return derived(); } /** \returns the matrix or vector obtained by evaluating this expression. * * Notice that in the case of a plain matrix or vector (not an expression) this function just returns * a const reference, in order to avoid a useless copy. */ EIGEN_STRONG_INLINE const typename internal::eval::type eval() const { return typename internal::eval::type(derived()); } protected: bool m_isRValue; }; } // end namespace Eigen #endif // EIGEN_SkylineMatrixBase_H RcppEigen/inst/include/unsupported/Eigen/src/Skyline/SkylineStorage.h0000644000176200001440000001744112253717461025505 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Guillaume Saupin // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINE_STORAGE_H #define EIGEN_SKYLINE_STORAGE_H namespace Eigen { /** Stores a skyline set of values in three structures : * The diagonal elements * The upper elements * The lower elements * */ template class SkylineStorage { typedef typename NumTraits::Real RealScalar; typedef SparseIndex Index; public: SkylineStorage() : m_diag(0), m_lower(0), m_upper(0), m_lowerProfile(0), m_upperProfile(0), m_diagSize(0), m_upperSize(0), m_lowerSize(0), m_upperProfileSize(0), m_lowerProfileSize(0), m_allocatedSize(0) { } SkylineStorage(const SkylineStorage& other) : m_diag(0), m_lower(0), m_upper(0), m_lowerProfile(0), m_upperProfile(0), m_diagSize(0), m_upperSize(0), m_lowerSize(0), m_upperProfileSize(0), m_lowerProfileSize(0), m_allocatedSize(0) { *this = other; } SkylineStorage & operator=(const SkylineStorage& other) { resize(other.diagSize(), other.m_upperProfileSize, other.m_lowerProfileSize, other.upperSize(), other.lowerSize()); memcpy(m_diag, other.m_diag, m_diagSize * sizeof (Scalar)); memcpy(m_upper, other.m_upper, other.upperSize() * sizeof (Scalar)); memcpy(m_lower, other.m_lower, other.lowerSize() * sizeof (Scalar)); memcpy(m_upperProfile, other.m_upperProfile, m_upperProfileSize * sizeof (Index)); memcpy(m_lowerProfile, other.m_lowerProfile, m_lowerProfileSize * sizeof (Index)); return *this; } void swap(SkylineStorage& other) { std::swap(m_diag, other.m_diag); std::swap(m_upper, other.m_upper); std::swap(m_lower, other.m_lower); std::swap(m_upperProfile, other.m_upperProfile); std::swap(m_lowerProfile, other.m_lowerProfile); std::swap(m_diagSize, other.m_diagSize); std::swap(m_upperSize, other.m_upperSize); std::swap(m_lowerSize, other.m_lowerSize); std::swap(m_allocatedSize, other.m_allocatedSize); } ~SkylineStorage() { delete[] m_diag; delete[] m_upper; if (m_upper != m_lower) delete[] m_lower; delete[] m_upperProfile; delete[] m_lowerProfile; } void reserve(Index size, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize) { Index newAllocatedSize = size + upperSize + lowerSize; if (newAllocatedSize > m_allocatedSize) reallocate(size, upperProfileSize, lowerProfileSize, upperSize, lowerSize); } void squeeze() { if (m_allocatedSize > m_diagSize + m_upperSize + m_lowerSize) reallocate(m_diagSize, m_upperProfileSize, m_lowerProfileSize, m_upperSize, m_lowerSize); } void resize(Index diagSize, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize, float reserveSizeFactor = 0) { if (m_allocatedSize < diagSize + upperSize + lowerSize) reallocate(diagSize, upperProfileSize, lowerProfileSize, upperSize + Index(reserveSizeFactor * upperSize), lowerSize + Index(reserveSizeFactor * lowerSize)); m_diagSize = diagSize; m_upperSize = upperSize; m_lowerSize = lowerSize; m_upperProfileSize = upperProfileSize; m_lowerProfileSize = lowerProfileSize; } inline Index diagSize() const { return m_diagSize; } inline Index upperSize() const { return m_upperSize; } inline Index lowerSize() const { return m_lowerSize; } inline Index upperProfileSize() const { return m_upperProfileSize; } inline Index lowerProfileSize() const { return m_lowerProfileSize; } inline Index allocatedSize() const { return m_allocatedSize; } inline void clear() { m_diagSize = 0; } inline Scalar& diag(Index i) { return m_diag[i]; } inline const Scalar& diag(Index i) const { return m_diag[i]; } inline Scalar& upper(Index i) { return m_upper[i]; } inline const Scalar& upper(Index i) const { return m_upper[i]; } inline Scalar& lower(Index i) { return m_lower[i]; } inline const Scalar& lower(Index i) const { return m_lower[i]; } inline Index& upperProfile(Index i) { return m_upperProfile[i]; } inline const Index& upperProfile(Index i) const { return m_upperProfile[i]; } inline Index& lowerProfile(Index i) { return m_lowerProfile[i]; } inline const Index& lowerProfile(Index i) const { return m_lowerProfile[i]; } static SkylineStorage Map(Index* upperProfile, Index* lowerProfile, Scalar* diag, Scalar* upper, Scalar* lower, Index size, Index upperSize, Index lowerSize) { SkylineStorage res; res.m_upperProfile = upperProfile; res.m_lowerProfile = lowerProfile; res.m_diag = diag; res.m_upper = upper; res.m_lower = lower; res.m_allocatedSize = res.m_diagSize = size; res.m_upperSize = upperSize; res.m_lowerSize = lowerSize; return res; } inline void reset() { memset(m_diag, 0, m_diagSize * sizeof (Scalar)); memset(m_upper, 0, m_upperSize * sizeof (Scalar)); memset(m_lower, 0, m_lowerSize * sizeof (Scalar)); memset(m_upperProfile, 0, m_diagSize * sizeof (Index)); memset(m_lowerProfile, 0, m_diagSize * sizeof (Index)); } void prune(Scalar reference, RealScalar epsilon = dummy_precision()) { //TODO } protected: inline void reallocate(Index diagSize, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize) { Scalar* diag = new Scalar[diagSize]; Scalar* upper = new Scalar[upperSize]; Scalar* lower = new Scalar[lowerSize]; Index* upperProfile = new Index[upperProfileSize]; Index* lowerProfile = new Index[lowerProfileSize]; Index copyDiagSize = (std::min)(diagSize, m_diagSize); Index copyUpperSize = (std::min)(upperSize, m_upperSize); Index copyLowerSize = (std::min)(lowerSize, m_lowerSize); Index copyUpperProfileSize = (std::min)(upperProfileSize, m_upperProfileSize); Index copyLowerProfileSize = (std::min)(lowerProfileSize, m_lowerProfileSize); // copy memcpy(diag, m_diag, copyDiagSize * sizeof (Scalar)); memcpy(upper, m_upper, copyUpperSize * sizeof (Scalar)); memcpy(lower, m_lower, copyLowerSize * sizeof (Scalar)); memcpy(upperProfile, m_upperProfile, copyUpperProfileSize * sizeof (Index)); memcpy(lowerProfile, m_lowerProfile, copyLowerProfileSize * sizeof (Index)); // delete old stuff delete[] m_diag; delete[] m_upper; delete[] m_lower; delete[] m_upperProfile; delete[] m_lowerProfile; m_diag = diag; m_upper = upper; m_lower = lower; m_upperProfile = upperProfile; m_lowerProfile = lowerProfile; m_allocatedSize = diagSize + upperSize + lowerSize; m_upperSize = upperSize; m_lowerSize = lowerSize; } public: Scalar* m_diag; Scalar* m_upper; Scalar* m_lower; Index* m_upperProfile; Index* m_lowerProfile; Index m_diagSize; Index m_upperSize; Index m_lowerSize; Index m_upperProfileSize; Index m_lowerProfileSize; Index m_allocatedSize; }; } // end namespace Eigen #endif // EIGEN_COMPRESSED_STORAGE_H RcppEigen/inst/include/unsupported/Eigen/src/Skyline/SkylineMatrix.h0000644000176200001440000007453112253717461025350 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Guillaume Saupin // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINEMATRIX_H #define EIGEN_SKYLINEMATRIX_H #include "SkylineStorage.h" #include "SkylineMatrixBase.h" namespace Eigen { /** \ingroup Skyline_Module * * \class SkylineMatrix * * \brief The main skyline matrix class * * This class implements a skyline matrix using the very uncommon storage * scheme. * * \param _Scalar the scalar type, i.e. the type of the coefficients * \param _Options Union of bit flags controlling the storage scheme. Currently the only possibility * is RowMajor. The default is 0 which means column-major. * * */ namespace internal { template struct traits > { typedef _Scalar Scalar; typedef Sparse StorageKind; enum { RowsAtCompileTime = Dynamic, ColsAtCompileTime = Dynamic, MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic, Flags = SkylineBit | _Options, CoeffReadCost = NumTraits::ReadCost, }; }; } template class SkylineMatrix : public SkylineMatrixBase > { public: EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(SkylineMatrix) EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(SkylineMatrix, +=) EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(SkylineMatrix, -=) using Base::IsRowMajor; protected: typedef SkylineMatrix TransposedSkylineMatrix; Index m_outerSize; Index m_innerSize; public: Index* m_colStartIndex; Index* m_rowStartIndex; SkylineStorage m_data; public: inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } inline Index innerSize() const { return m_innerSize; } inline Index outerSize() const { return m_outerSize; } inline Index upperNonZeros() const { return m_data.upperSize(); } inline Index lowerNonZeros() const { return m_data.lowerSize(); } inline Index upperNonZeros(Index j) const { return m_colStartIndex[j + 1] - m_colStartIndex[j]; } inline Index lowerNonZeros(Index j) const { return m_rowStartIndex[j + 1] - m_rowStartIndex[j]; } inline const Scalar* _diagPtr() const { return &m_data.diag(0); } inline Scalar* _diagPtr() { return &m_data.diag(0); } inline const Scalar* _upperPtr() const { return &m_data.upper(0); } inline Scalar* _upperPtr() { return &m_data.upper(0); } inline const Scalar* _lowerPtr() const { return &m_data.lower(0); } inline Scalar* _lowerPtr() { return &m_data.lower(0); } inline const Index* _upperProfilePtr() const { return &m_data.upperProfile(0); } inline Index* _upperProfilePtr() { return &m_data.upperProfile(0); } inline const Index* _lowerProfilePtr() const { return &m_data.lowerProfile(0); } inline Index* _lowerProfilePtr() { return &m_data.lowerProfile(0); } inline Scalar coeff(Index row, Index col) const { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); if (outer == inner) return this->m_data.diag(outer); if (IsRowMajor) { if (inner > outer) //upper matrix { const Index minOuterIndex = inner - m_data.upperProfile(inner); if (outer >= minOuterIndex) return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); else return Scalar(0); } if (inner < outer) //lower matrix { const Index minInnerIndex = outer - m_data.lowerProfile(outer); if (inner >= minInnerIndex) return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); else return Scalar(0); } return m_data.upper(m_colStartIndex[inner] + outer - inner); } else { if (outer > inner) //upper matrix { const Index maxOuterIndex = inner + m_data.upperProfile(inner); if (outer <= maxOuterIndex) return this->m_data.upper(m_colStartIndex[inner] + (outer - inner)); else return Scalar(0); } if (outer < inner) //lower matrix { const Index maxInnerIndex = outer + m_data.lowerProfile(outer); if (inner <= maxInnerIndex) return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer)); else return Scalar(0); } } } inline Scalar& coeffRef(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); if (outer == inner) return this->m_data.diag(outer); if (IsRowMajor) { if (col > row) //upper matrix { const Index minOuterIndex = inner - m_data.upperProfile(inner); eigen_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); } if (col < row) //lower matrix { const Index minInnerIndex = outer - m_data.lowerProfile(outer); eigen_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); } } else { if (outer > inner) //upper matrix { const Index maxOuterIndex = inner + m_data.upperProfile(inner); eigen_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.upper(m_colStartIndex[inner] + (outer - inner)); } if (outer < inner) //lower matrix { const Index maxInnerIndex = outer + m_data.lowerProfile(outer); eigen_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer)); } } } inline Scalar coeffDiag(Index idx) const { eigen_assert(idx < outerSize()); eigen_assert(idx < innerSize()); return this->m_data.diag(idx); } inline Scalar coeffLower(Index row, Index col) const { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); eigen_assert(inner != outer); if (IsRowMajor) { const Index minInnerIndex = outer - m_data.lowerProfile(outer); if (inner >= minInnerIndex) return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); else return Scalar(0); } else { const Index maxInnerIndex = outer + m_data.lowerProfile(outer); if (inner <= maxInnerIndex) return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer)); else return Scalar(0); } } inline Scalar coeffUpper(Index row, Index col) const { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); eigen_assert(inner != outer); if (IsRowMajor) { const Index minOuterIndex = inner - m_data.upperProfile(inner); if (outer >= minOuterIndex) return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); else return Scalar(0); } else { const Index maxOuterIndex = inner + m_data.upperProfile(inner); if (outer <= maxOuterIndex) return this->m_data.upper(m_colStartIndex[inner] + (outer - inner)); else return Scalar(0); } } inline Scalar& coeffRefDiag(Index idx) { eigen_assert(idx < outerSize()); eigen_assert(idx < innerSize()); return this->m_data.diag(idx); } inline Scalar& coeffRefLower(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); eigen_assert(inner != outer); if (IsRowMajor) { const Index minInnerIndex = outer - m_data.lowerProfile(outer); eigen_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); } else { const Index maxInnerIndex = outer + m_data.lowerProfile(outer); eigen_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer)); } } inline bool coeffExistLower(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); eigen_assert(inner != outer); if (IsRowMajor) { const Index minInnerIndex = outer - m_data.lowerProfile(outer); return inner >= minInnerIndex; } else { const Index maxInnerIndex = outer + m_data.lowerProfile(outer); return inner <= maxInnerIndex; } } inline Scalar& coeffRefUpper(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); eigen_assert(inner != outer); if (IsRowMajor) { const Index minOuterIndex = inner - m_data.upperProfile(inner); eigen_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); } else { const Index maxOuterIndex = inner + m_data.upperProfile(inner); eigen_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage"); return this->m_data.upper(m_colStartIndex[inner] + (outer - inner)); } } inline bool coeffExistUpper(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); eigen_assert(inner != outer); if (IsRowMajor) { const Index minOuterIndex = inner - m_data.upperProfile(inner); return outer >= minOuterIndex; } else { const Index maxOuterIndex = inner + m_data.upperProfile(inner); return outer <= maxOuterIndex; } } protected: public: class InnerUpperIterator; class InnerLowerIterator; class OuterUpperIterator; class OuterLowerIterator; /** Removes all non zeros */ inline void setZero() { m_data.clear(); memset(m_colStartIndex, 0, (m_outerSize + 1) * sizeof (Index)); memset(m_rowStartIndex, 0, (m_outerSize + 1) * sizeof (Index)); } /** \returns the number of non zero coefficients */ inline Index nonZeros() const { return m_data.diagSize() + m_data.upperSize() + m_data.lowerSize(); } /** Preallocates \a reserveSize non zeros */ inline void reserve(Index reserveSize, Index reserveUpperSize, Index reserveLowerSize) { m_data.reserve(reserveSize, reserveUpperSize, reserveLowerSize); } /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col. * * \warning This function can be extremely slow if the non zero coefficients * are not inserted in a coherent order. * * After an insertion session, you should call the finalize() function. */ EIGEN_DONT_INLINE Scalar & insert(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(outer < outerSize()); eigen_assert(inner < innerSize()); if (outer == inner) return m_data.diag(col); if (IsRowMajor) { if (outer < inner) //upper matrix { Index minOuterIndex = 0; minOuterIndex = inner - m_data.upperProfile(inner); if (outer < minOuterIndex) //The value does not yet exist { const Index previousProfile = m_data.upperProfile(inner); m_data.upperProfile(inner) = inner - outer; const Index bandIncrement = m_data.upperProfile(inner) - previousProfile; //shift data stored after this new one const Index stop = m_colStartIndex[cols()]; const Index start = m_colStartIndex[inner]; for (Index innerIdx = stop; innerIdx >= start; innerIdx--) { m_data.upper(innerIdx + bandIncrement) = m_data.upper(innerIdx); } for (Index innerIdx = cols(); innerIdx > inner; innerIdx--) { m_colStartIndex[innerIdx] += bandIncrement; } //zeros new data memset(this->_upperPtr() + start, 0, (bandIncrement - 1) * sizeof (Scalar)); return m_data.upper(m_colStartIndex[inner]); } else { return m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); } } if (outer > inner) //lower matrix { const Index minInnerIndex = outer - m_data.lowerProfile(outer); if (inner < minInnerIndex) //The value does not yet exist { const Index previousProfile = m_data.lowerProfile(outer); m_data.lowerProfile(outer) = outer - inner; const Index bandIncrement = m_data.lowerProfile(outer) - previousProfile; //shift data stored after this new one const Index stop = m_rowStartIndex[rows()]; const Index start = m_rowStartIndex[outer]; for (Index innerIdx = stop; innerIdx >= start; innerIdx--) { m_data.lower(innerIdx + bandIncrement) = m_data.lower(innerIdx); } for (Index innerIdx = rows(); innerIdx > outer; innerIdx--) { m_rowStartIndex[innerIdx] += bandIncrement; } //zeros new data memset(this->_lowerPtr() + start, 0, (bandIncrement - 1) * sizeof (Scalar)); return m_data.lower(m_rowStartIndex[outer]); } else { return m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); } } } else { if (outer > inner) //upper matrix { const Index maxOuterIndex = inner + m_data.upperProfile(inner); if (outer > maxOuterIndex) //The value does not yet exist { const Index previousProfile = m_data.upperProfile(inner); m_data.upperProfile(inner) = outer - inner; const Index bandIncrement = m_data.upperProfile(inner) - previousProfile; //shift data stored after this new one const Index stop = m_rowStartIndex[rows()]; const Index start = m_rowStartIndex[inner + 1]; for (Index innerIdx = stop; innerIdx >= start; innerIdx--) { m_data.upper(innerIdx + bandIncrement) = m_data.upper(innerIdx); } for (Index innerIdx = inner + 1; innerIdx < outerSize() + 1; innerIdx++) { m_rowStartIndex[innerIdx] += bandIncrement; } memset(this->_upperPtr() + m_rowStartIndex[inner] + previousProfile + 1, 0, (bandIncrement - 1) * sizeof (Scalar)); return m_data.upper(m_rowStartIndex[inner] + m_data.upperProfile(inner)); } else { return m_data.upper(m_rowStartIndex[inner] + (outer - inner)); } } if (outer < inner) //lower matrix { const Index maxInnerIndex = outer + m_data.lowerProfile(outer); if (inner > maxInnerIndex) //The value does not yet exist { const Index previousProfile = m_data.lowerProfile(outer); m_data.lowerProfile(outer) = inner - outer; const Index bandIncrement = m_data.lowerProfile(outer) - previousProfile; //shift data stored after this new one const Index stop = m_colStartIndex[cols()]; const Index start = m_colStartIndex[outer + 1]; for (Index innerIdx = stop; innerIdx >= start; innerIdx--) { m_data.lower(innerIdx + bandIncrement) = m_data.lower(innerIdx); } for (Index innerIdx = outer + 1; innerIdx < outerSize() + 1; innerIdx++) { m_colStartIndex[innerIdx] += bandIncrement; } memset(this->_lowerPtr() + m_colStartIndex[outer] + previousProfile + 1, 0, (bandIncrement - 1) * sizeof (Scalar)); return m_data.lower(m_colStartIndex[outer] + m_data.lowerProfile(outer)); } else { return m_data.lower(m_colStartIndex[outer] + (inner - outer)); } } } } /** Must be called after inserting a set of non zero entries. */ inline void finalize() { if (IsRowMajor) { if (rows() > cols()) m_data.resize(cols(), cols(), rows(), m_colStartIndex[cols()] + 1, m_rowStartIndex[rows()] + 1); else m_data.resize(rows(), cols(), rows(), m_colStartIndex[cols()] + 1, m_rowStartIndex[rows()] + 1); // eigen_assert(rows() == cols() && "memory reorganisatrion only works with suare matrix"); // // Scalar* newArray = new Scalar[m_colStartIndex[cols()] + 1 + m_rowStartIndex[rows()] + 1]; // Index dataIdx = 0; // for (Index row = 0; row < rows(); row++) { // // const Index nbLowerElts = m_rowStartIndex[row + 1] - m_rowStartIndex[row]; // // std::cout << "nbLowerElts" << nbLowerElts << std::endl; // memcpy(newArray + dataIdx, m_data.m_lower + m_rowStartIndex[row], nbLowerElts * sizeof (Scalar)); // m_rowStartIndex[row] = dataIdx; // dataIdx += nbLowerElts; // // const Index nbUpperElts = m_colStartIndex[row + 1] - m_colStartIndex[row]; // memcpy(newArray + dataIdx, m_data.m_upper + m_colStartIndex[row], nbUpperElts * sizeof (Scalar)); // m_colStartIndex[row] = dataIdx; // dataIdx += nbUpperElts; // // // } // //todo : don't access m_data profile directly : add an accessor from SkylineMatrix // m_rowStartIndex[rows()] = m_rowStartIndex[rows()-1] + m_data.lowerProfile(rows()-1); // m_colStartIndex[cols()] = m_colStartIndex[cols()-1] + m_data.upperProfile(cols()-1); // // delete[] m_data.m_lower; // delete[] m_data.m_upper; // // m_data.m_lower = newArray; // m_data.m_upper = newArray; } else { if (rows() > cols()) m_data.resize(cols(), rows(), cols(), m_rowStartIndex[cols()] + 1, m_colStartIndex[cols()] + 1); else m_data.resize(rows(), rows(), cols(), m_rowStartIndex[rows()] + 1, m_colStartIndex[rows()] + 1); } } inline void squeeze() { finalize(); m_data.squeeze(); } void prune(Scalar reference, RealScalar epsilon = dummy_precision ()) { //TODO } /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero * \sa resizeNonZeros(Index), reserve(), setZero() */ void resize(size_t rows, size_t cols) { const Index diagSize = rows > cols ? cols : rows; m_innerSize = IsRowMajor ? cols : rows; eigen_assert(rows == cols && "Skyline matrix must be square matrix"); if (diagSize % 2) { // diagSize is odd const Index k = (diagSize - 1) / 2; m_data.resize(diagSize, IsRowMajor ? cols : rows, IsRowMajor ? rows : cols, 2 * k * k + k + 1, 2 * k * k + k + 1); } else // diagSize is even { const Index k = diagSize / 2; m_data.resize(diagSize, IsRowMajor ? cols : rows, IsRowMajor ? rows : cols, 2 * k * k - k + 1, 2 * k * k - k + 1); } if (m_colStartIndex && m_rowStartIndex) { delete[] m_colStartIndex; delete[] m_rowStartIndex; } m_colStartIndex = new Index [cols + 1]; m_rowStartIndex = new Index [rows + 1]; m_outerSize = diagSize; m_data.reset(); m_data.clear(); m_outerSize = diagSize; memset(m_colStartIndex, 0, (cols + 1) * sizeof (Index)); memset(m_rowStartIndex, 0, (rows + 1) * sizeof (Index)); } void resizeNonZeros(Index size) { m_data.resize(size); } inline SkylineMatrix() : m_outerSize(-1), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) { resize(0, 0); } inline SkylineMatrix(size_t rows, size_t cols) : m_outerSize(0), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) { resize(rows, cols); } template inline SkylineMatrix(const SkylineMatrixBase& other) : m_outerSize(0), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) { *this = other.derived(); } inline SkylineMatrix(const SkylineMatrix & other) : Base(), m_outerSize(0), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) { *this = other.derived(); } inline void swap(SkylineMatrix & other) { //EIGEN_DBG_SKYLINE(std::cout << "SkylineMatrix:: swap\n"); std::swap(m_colStartIndex, other.m_colStartIndex); std::swap(m_rowStartIndex, other.m_rowStartIndex); std::swap(m_innerSize, other.m_innerSize); std::swap(m_outerSize, other.m_outerSize); m_data.swap(other.m_data); } inline SkylineMatrix & operator=(const SkylineMatrix & other) { std::cout << "SkylineMatrix& operator=(const SkylineMatrix& other)\n"; if (other.isRValue()) { swap(other.const_cast_derived()); } else { resize(other.rows(), other.cols()); memcpy(m_colStartIndex, other.m_colStartIndex, (m_outerSize + 1) * sizeof (Index)); memcpy(m_rowStartIndex, other.m_rowStartIndex, (m_outerSize + 1) * sizeof (Index)); m_data = other.m_data; } return *this; } template inline SkylineMatrix & operator=(const SkylineMatrixBase& other) { const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); if (needToTranspose) { // TODO // return *this; } else { // there is no special optimization return SkylineMatrixBase::operator=(other.derived()); } } friend std::ostream & operator <<(std::ostream & s, const SkylineMatrix & m) { EIGEN_DBG_SKYLINE( std::cout << "upper elements : " << std::endl; for (Index i = 0; i < m.m_data.upperSize(); i++) std::cout << m.m_data.upper(i) << "\t"; std::cout << std::endl; std::cout << "upper profile : " << std::endl; for (Index i = 0; i < m.m_data.upperProfileSize(); i++) std::cout << m.m_data.upperProfile(i) << "\t"; std::cout << std::endl; std::cout << "lower startIdx : " << std::endl; for (Index i = 0; i < m.m_data.upperProfileSize(); i++) std::cout << (IsRowMajor ? m.m_colStartIndex[i] : m.m_rowStartIndex[i]) << "\t"; std::cout << std::endl; std::cout << "lower elements : " << std::endl; for (Index i = 0; i < m.m_data.lowerSize(); i++) std::cout << m.m_data.lower(i) << "\t"; std::cout << std::endl; std::cout << "lower profile : " << std::endl; for (Index i = 0; i < m.m_data.lowerProfileSize(); i++) std::cout << m.m_data.lowerProfile(i) << "\t"; std::cout << std::endl; std::cout << "lower startIdx : " << std::endl; for (Index i = 0; i < m.m_data.lowerProfileSize(); i++) std::cout << (IsRowMajor ? m.m_rowStartIndex[i] : m.m_colStartIndex[i]) << "\t"; std::cout << std::endl; ); for (Index rowIdx = 0; rowIdx < m.rows(); rowIdx++) { for (Index colIdx = 0; colIdx < m.cols(); colIdx++) { s << m.coeff(rowIdx, colIdx) << "\t"; } s << std::endl; } return s; } /** Destructor */ inline ~SkylineMatrix() { delete[] m_colStartIndex; delete[] m_rowStartIndex; } /** Overloaded for performance */ Scalar sum() const; }; template class SkylineMatrix::InnerUpperIterator { public: InnerUpperIterator(const SkylineMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), m_id(_Options == RowMajor ? mat.m_colStartIndex[outer] : mat.m_rowStartIndex[outer] + 1), m_start(m_id), m_end(_Options == RowMajor ? mat.m_colStartIndex[outer + 1] : mat.m_rowStartIndex[outer + 1] + 1) { } inline InnerUpperIterator & operator++() { m_id++; return *this; } inline InnerUpperIterator & operator+=(Index shift) { m_id += shift; return *this; } inline Scalar value() const { return m_matrix.m_data.upper(m_id); } inline Scalar* valuePtr() { return const_cast (&(m_matrix.m_data.upper(m_id))); } inline Scalar& valueRef() { return const_cast (m_matrix.m_data.upper(m_id)); } inline Index index() const { return IsRowMajor ? m_outer - m_matrix.m_data.upperProfile(m_outer) + (m_id - m_start) : m_outer + (m_id - m_start) + 1; } inline Index row() const { return IsRowMajor ? index() : m_outer; } inline Index col() const { return IsRowMajor ? m_outer : index(); } inline size_t size() const { return m_matrix.m_data.upperProfile(m_outer); } inline operator bool() const { return (m_id < m_end) && (m_id >= m_start); } protected: const SkylineMatrix& m_matrix; const Index m_outer; Index m_id; const Index m_start; const Index m_end; }; template class SkylineMatrix::InnerLowerIterator { public: InnerLowerIterator(const SkylineMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), m_id(_Options == RowMajor ? mat.m_rowStartIndex[outer] : mat.m_colStartIndex[outer] + 1), m_start(m_id), m_end(_Options == RowMajor ? mat.m_rowStartIndex[outer + 1] : mat.m_colStartIndex[outer + 1] + 1) { } inline InnerLowerIterator & operator++() { m_id++; return *this; } inline InnerLowerIterator & operator+=(Index shift) { m_id += shift; return *this; } inline Scalar value() const { return m_matrix.m_data.lower(m_id); } inline Scalar* valuePtr() { return const_cast (&(m_matrix.m_data.lower(m_id))); } inline Scalar& valueRef() { return const_cast (m_matrix.m_data.lower(m_id)); } inline Index index() const { return IsRowMajor ? m_outer - m_matrix.m_data.lowerProfile(m_outer) + (m_id - m_start) : m_outer + (m_id - m_start) + 1; ; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } inline size_t size() const { return m_matrix.m_data.lowerProfile(m_outer); } inline operator bool() const { return (m_id < m_end) && (m_id >= m_start); } protected: const SkylineMatrix& m_matrix; const Index m_outer; Index m_id; const Index m_start; const Index m_end; }; } // end namespace Eigen #endif // EIGEN_SkylineMatrix_H RcppEigen/inst/include/unsupported/Eigen/src/Skyline/SkylineUtil.h0000644000176200001440000000612112253717461025007 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Guillaume Saupin // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINEUTIL_H #define EIGEN_SKYLINEUTIL_H namespace Eigen { #ifdef NDEBUG #define EIGEN_DBG_SKYLINE(X) #else #define EIGEN_DBG_SKYLINE(X) X #endif const unsigned int SkylineBit = 0x1200; template class SkylineProduct; enum AdditionalProductEvaluationMode {SkylineTimeDenseProduct, SkylineTimeSkylineProduct, DenseTimeSkylineProduct}; enum {IsSkyline = SkylineBit}; #define EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \ template \ EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SkylineMatrixBase& other) \ { \ return Base::operator Op(other.derived()); \ } \ EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \ { \ return Base::operator Op(other); \ } #define EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \ template \ EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \ { \ return Base::operator Op(scalar); \ } #define EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \ EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \ EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) #define _EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \ typedef BaseClass Base; \ typedef typename Eigen::internal::traits::Scalar Scalar; \ typedef typename Eigen::NumTraits::Real RealScalar; \ typedef typename Eigen::internal::traits::StorageKind StorageKind; \ typedef typename Eigen::internal::index::type Index; \ enum { Flags = Eigen::internal::traits::Flags, }; #define EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived) \ _EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SkylineMatrixBase) template class SkylineMatrixBase; template class SkylineMatrix; template class DynamicSkylineMatrix; template class SkylineVector; template class MappedSkylineMatrix; namespace internal { template struct skyline_product_mode; template::value> struct SkylineProductReturnType; template class eval { typedef typename traits::Scalar _Scalar; enum { _Flags = traits::Flags }; public: typedef SkylineMatrix<_Scalar, _Flags> type; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_SKYLINEUTIL_H RcppEigen/inst/include/unsupported/Eigen/src/SVD/0000755000176200001440000000000012253717461021400 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/SVD/doneInBDCSVD.txt0000644000176200001440000000204612253717461024245 0ustar liggesusersThis unsupported package is about a divide and conquer algorithm to compute SVD. The implementation follows as closely as possible the following reference paper : http://www.cs.yale.edu/publications/techreports/tr933.pdf The code documentation uses the same names for variables as the reference paper. The code, deflation included, is working but there are a few things that could be optimised as explained in the TODOBdsvd. In the code comments were put at the line where would be the third step of the algorithm so one could simply add the call of a function doing the last part of the algorithm and that would not require any knowledge of the part we implemented. In the TODOBdcsvd we explain what is the main difficulty of the last part and suggest a reference paper to help solve it. The implemented has trouble with fixed size matrices. In the actual implementation, it returns matrices of zero when ask to do a svd on an int matrix. Paper for the third part: http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf RcppEigen/inst/include/unsupported/Eigen/src/SVD/TODOBdcsvd.txt0000644000176200001440000000351012253717461024033 0ustar liggesusersTO DO LIST (optional optimization) - do all the allocations in the allocate part - support static matrices - return a error at compilation time when using integer matrices (int, long, std::complex, ...) to finish the algorithm : -implement the last part of the algorithm as described on the reference paper. You may find more information on that part on this paper -to replace the call to JacobiSVD at the end of the divide algorithm, just after the call to deflation. (suggested step by step resolution) 0) comment the call to Jacobi in the last part of the divide method and everything right after until the end of the method. What is commented can be a guideline to steps 3) 4) and 6) 1) solve the secular equation (Characteristic equation) on the values that are not null (zi!=0 and di!=0), after the deflation wich should be uncommented in the divide method 2) remember the values of the singular values that are already computed (zi=0) 3) assign the singular values found in m_computed at the right places (with the ones found in step 2) ) in decreasing order 4) set the firstcol to zero (except the first element) in m_computed 5) compute all the singular vectors when CompV is set to true and only the left vectors when CompV is set to false 6) multiply naiveU and naiveV to the right by the matrices found, only naiveU when CompV is set to false, /!\ if CompU is false NaiveU has only 2 rows 7) delete everything commented in step 0) RcppEigen/inst/include/unsupported/Eigen/src/SVD/JacobiSVD.h0000644000176200001440000007665512253717461023340 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_JACOBISVD_H #define EIGEN_JACOBISVD_H namespace Eigen { namespace internal { // forward declaration (needed by ICC) // the empty body is required by MSVC template::IsComplex> struct svd_precondition_2x2_block_to_be_real {}; /*** QR preconditioners (R-SVD) *** *** Their role is to reduce the problem of computing the SVD to the case of a square matrix. *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for *** JacobiSVD which by itself is only able to work on square matrices. ***/ enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols }; template struct qr_preconditioner_should_do_anything { enum { a = MatrixType::RowsAtCompileTime != Dynamic && MatrixType::ColsAtCompileTime != Dynamic && MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime, b = MatrixType::RowsAtCompileTime != Dynamic && MatrixType::ColsAtCompileTime != Dynamic && MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime, ret = !( (QRPreconditioner == NoQRPreconditioner) || (Case == PreconditionIfMoreColsThanRows && bool(a)) || (Case == PreconditionIfMoreRowsThanCols && bool(b)) ) }; }; template::ret > struct qr_preconditioner_impl {}; template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; void allocate(const JacobiSVD&) {} bool run(JacobiSVD&, const MatrixType&) { return false; } }; /*** preconditioner using FullPivHouseholderQR ***/ template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime }; typedef Matrix WorkspaceType; void allocate(const JacobiSVD& svd) { if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.rows(), svd.cols()); } if (svd.m_computeFullU) m_workspace.resize(svd.rows()); } bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.rows() > matrix.cols()) { m_qr.compute(matrix); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace); if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); return true; } return false; } private: typedef FullPivHouseholderQR QRType; QRType m_qr; WorkspaceType m_workspace; }; template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, Options = MatrixType::Options }; typedef Matrix TransposeTypeWithSameStorageOrder; void allocate(const JacobiSVD& svd) { if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.cols(), svd.rows()); } m_adjoint.resize(svd.cols(), svd.rows()); if (svd.m_computeFullV) m_workspace.resize(svd.cols()); } bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.cols() > matrix.rows()) { m_adjoint = matrix.adjoint(); m_qr.compute(m_adjoint); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace); if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); return true; } else return false; } private: typedef FullPivHouseholderQR QRType; QRType m_qr; TransposeTypeWithSameStorageOrder m_adjoint; typename internal::plain_row_type::type m_workspace; }; /*** preconditioner using ColPivHouseholderQR ***/ template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; void allocate(const JacobiSVD& svd) { if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.rows(), svd.cols()); } if (svd.m_computeFullU) m_workspace.resize(svd.rows()); else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); } bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.rows() > matrix.cols()) { m_qr.compute(matrix); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); else if(svd.m_computeThinU) { svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); } if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); return true; } return false; } private: typedef ColPivHouseholderQR QRType; QRType m_qr; typename internal::plain_col_type::type m_workspace; }; template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, Options = MatrixType::Options }; typedef Matrix TransposeTypeWithSameStorageOrder; void allocate(const JacobiSVD& svd) { if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.cols(), svd.rows()); } if (svd.m_computeFullV) m_workspace.resize(svd.cols()); else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); m_adjoint.resize(svd.cols(), svd.rows()); } bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.cols() > matrix.rows()) { m_adjoint = matrix.adjoint(); m_qr.compute(m_adjoint); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); else if(svd.m_computeThinV) { svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); } if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); return true; } else return false; } private: typedef ColPivHouseholderQR QRType; QRType m_qr; TransposeTypeWithSameStorageOrder m_adjoint; typename internal::plain_row_type::type m_workspace; }; /*** preconditioner using HouseholderQR ***/ template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; void allocate(const JacobiSVD& svd) { if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.rows(), svd.cols()); } if (svd.m_computeFullU) m_workspace.resize(svd.rows()); else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); } bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.rows() > matrix.cols()) { m_qr.compute(matrix); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); else if(svd.m_computeThinU) { svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); } if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols()); return true; } return false; } private: typedef HouseholderQR QRType; QRType m_qr; typename internal::plain_col_type::type m_workspace; }; template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, Options = MatrixType::Options }; typedef Matrix TransposeTypeWithSameStorageOrder; void allocate(const JacobiSVD& svd) { if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.cols(), svd.rows()); } if (svd.m_computeFullV) m_workspace.resize(svd.cols()); else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); m_adjoint.resize(svd.cols(), svd.rows()); } bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.cols() > matrix.rows()) { m_adjoint = matrix.adjoint(); m_qr.compute(m_adjoint); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); else if(svd.m_computeThinV) { svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); } if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows()); return true; } else return false; } private: typedef HouseholderQR QRType; QRType m_qr; TransposeTypeWithSameStorageOrder m_adjoint; typename internal::plain_row_type::type m_workspace; }; /*** 2x2 SVD implementation *** *** JacobiSVD consists in performing a series of 2x2 SVD subproblems ***/ template struct svd_precondition_2x2_block_to_be_real { typedef JacobiSVD SVD; typedef typename SVD::Index Index; static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {} }; template struct svd_precondition_2x2_block_to_be_real { typedef JacobiSVD SVD; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename SVD::Index Index; static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q) { using std::sqrt; Scalar z; JacobiRotation rot; RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p))); if(n==0) { z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); work_matrix.row(p) *= z; if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); work_matrix.row(q) *= z; if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); } else { rot.c() = conj(work_matrix.coeff(p,p)) / n; rot.s() = work_matrix.coeff(q,p) / n; work_matrix.applyOnTheLeft(p,q,rot); if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint()); if(work_matrix.coeff(p,q) != Scalar(0)) { Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); work_matrix.col(q) *= z; if(svd.computeV()) svd.m_matrixV.col(q) *= z; } if(work_matrix.coeff(q,q) != Scalar(0)) { z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); work_matrix.row(q) *= z; if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); } } } }; template void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, JacobiRotation *j_left, JacobiRotation *j_right) { using std::sqrt; Matrix m; m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)), numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q)); JacobiRotation rot1; RealScalar t = m.coeff(0,0) + m.coeff(1,1); RealScalar d = m.coeff(1,0) - m.coeff(0,1); if(t == RealScalar(0)) { rot1.c() = RealScalar(0); rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1); } else { RealScalar u = d / t; rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u)); rot1.s() = rot1.c() * u; } m.applyOnTheLeft(0,1,rot1); j_right->makeJacobi(m,0,1); *j_left = rot1 * j_right->transpose(); } } // end namespace internal /** \ingroup SVD_Module * * * \class JacobiSVD * * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix * * \param MatrixType the type of the matrix of which we are computing the SVD decomposition * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally * for the R-SVD step for non-square matrices. See discussion of possible values below. * * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product * \f[ A = U S V^* \f] * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal; * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left * and right \em singular \em vectors of \a A respectively. * * Singular values are always sorted in decreasing order. * * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly. * * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. * * Here's an example demonstrating basic usage: * \include JacobiSVD_basic.cpp * Output: \verbinclude JacobiSVD_basic.out * * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms. * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension. * * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to * terminate in finite (and reasonable) time. * * The possible values for QRPreconditioner are: * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR. * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR. * Contrary to other QRs, it doesn't allow computing thin unitaries. * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR. * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive * process is more reliable than the optimized bidiagonal SVD iterations. * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking * if QR preconditioning is needed before applying it anyway. * * \sa MatrixBase::jacobiSvd() */ template class JacobiSVD : public SVDBase<_MatrixType> { public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), MatrixOptions = MatrixType::Options }; typedef Matrix MatrixUType; typedef Matrix MatrixVType; typedef typename internal::plain_diag_type::type SingularValuesType; typedef typename internal::plain_row_type::type RowType; typedef typename internal::plain_col_type::type ColType; typedef Matrix WorkMatrixType; /** \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via JacobiSVD::compute(const MatrixType&). */ JacobiSVD() : SVDBase<_MatrixType>::SVDBase() {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem size. * \sa JacobiSVD() */ JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0) : SVDBase<_MatrixType>::SVDBase() { allocate(rows, cols, computationOptions); } /** \brief Constructor performing the decomposition of given matrix. * * \param matrix the matrix to decompose * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, * #ComputeFullV, #ComputeThinV. * * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not * available with the (non-default) FullPivHouseholderQR preconditioner. */ JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0) : SVDBase<_MatrixType>::SVDBase() { compute(matrix, computationOptions); } /** \brief Method performing the decomposition of given matrix using custom options. * * \param matrix the matrix to decompose * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, * #ComputeFullV, #ComputeThinV. * * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not * available with the (non-default) FullPivHouseholderQR preconditioner. */ SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions); /** \brief Method performing the decomposition of given matrix using current options. * * \param matrix the matrix to decompose * * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). */ SVDBase& compute(const MatrixType& matrix) { return compute(matrix, this->m_computationOptions); } /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. * * \param b the right-hand-side of the equation to solve. * * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. * * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving. * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. */ template inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(this->m_isInitialized && "JacobiSVD is not initialized."); eigen_assert(SVDBase::computeU() && SVDBase::computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); return internal::solve_retval(*this, b.derived()); } private: void allocate(Index rows, Index cols, unsigned int computationOptions); protected: WorkMatrixType m_workMatrix; template friend struct internal::svd_precondition_2x2_block_to_be_real; template friend struct internal::qr_preconditioner_impl; internal::qr_preconditioner_impl m_qr_precond_morecols; internal::qr_preconditioner_impl m_qr_precond_morerows; }; template void JacobiSVD::allocate(Index rows, Index cols, unsigned int computationOptions) { if (SVDBase::allocate(rows, cols, computationOptions)) return; if (QRPreconditioner == FullPivHouseholderQRPreconditioner) { eigen_assert(!(this->m_computeThinU || this->m_computeThinV) && "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. " "Use the ColPivHouseholderQR preconditioner instead."); } m_workMatrix.resize(this->m_diagSize, this->m_diagSize); if(this->m_cols>this->m_rows) m_qr_precond_morecols.allocate(*this); if(this->m_rows>this->m_cols) m_qr_precond_morerows.allocate(*this); } template SVDBase& JacobiSVD::compute(const MatrixType& matrix, unsigned int computationOptions) { using std::abs; allocate(matrix.rows(), matrix.cols(), computationOptions); // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations, // only worsening the precision of U and V as we accumulate more rotations const RealScalar precision = RealScalar(2) * NumTraits::epsilon(); // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286) const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits::denorm_min(); /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */ if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix)) { m_workMatrix = matrix.block(0,0,this->m_diagSize,this->m_diagSize); if(this->m_computeFullU) this->m_matrixU.setIdentity(this->m_rows,this->m_rows); if(this->m_computeThinU) this->m_matrixU.setIdentity(this->m_rows,this->m_diagSize); if(this->m_computeFullV) this->m_matrixV.setIdentity(this->m_cols,this->m_cols); if(this->m_computeThinV) this->m_matrixV.setIdentity(this->m_cols, this->m_diagSize); } /*** step 2. The main Jacobi SVD iteration. ***/ bool finished = false; while(!finished) { finished = true; // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix for(Index p = 1; p < this->m_diagSize; ++p) { for(Index q = 0; q < p; ++q) { // if this 2x2 sub-matrix is not diagonal already... // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't // keep us iterating forever. Similarly, small denormal numbers are considered zero. using std::max; RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q)))); if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold) { finished = false; // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal internal::svd_precondition_2x2_block_to_be_real::run(m_workMatrix, *this, p, q); JacobiRotation j_left, j_right; internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right); // accumulate resulting Jacobi rotations m_workMatrix.applyOnTheLeft(p,q,j_left); if(SVDBase::computeU()) this->m_matrixU.applyOnTheRight(p,q,j_left.transpose()); m_workMatrix.applyOnTheRight(p,q,j_right); if(SVDBase::computeV()) this->m_matrixV.applyOnTheRight(p,q,j_right); } } } } /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/ for(Index i = 0; i < this->m_diagSize; ++i) { RealScalar a = abs(m_workMatrix.coeff(i,i)); this->m_singularValues.coeffRef(i) = a; if(SVDBase::computeU() && (a!=RealScalar(0))) this->m_matrixU.col(i) *= this->m_workMatrix.coeff(i,i)/a; } /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/ this->m_nonzeroSingularValues = this->m_diagSize; for(Index i = 0; i < this->m_diagSize; i++) { Index pos; RealScalar maxRemainingSingularValue = this->m_singularValues.tail(this->m_diagSize-i).maxCoeff(&pos); if(maxRemainingSingularValue == RealScalar(0)) { this->m_nonzeroSingularValues = i; break; } if(pos) { pos += i; std::swap(this->m_singularValues.coeffRef(i), this->m_singularValues.coeffRef(pos)); if(SVDBase::computeU()) this->m_matrixU.col(pos).swap(this->m_matrixU.col(i)); if(SVDBase::computeV()) this->m_matrixV.col(pos).swap(this->m_matrixV.col(i)); } } this->m_isInitialized = true; return *this; } namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType; EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs) template void evalTo(Dest& dst) const { eigen_assert(rhs().rows() == dec().rows()); // A = U S V^* // So A^{-1} = V S^{-1} U^* Index diagSize = (std::min)(dec().rows(), dec().cols()); typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize); Index nonzeroSingVals = dec().nonzeroSingularValues(); invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse(); invertedSingVals.tail(diagSize - nonzeroSingVals).setZero(); dst = dec().matrixV().leftCols(diagSize) * invertedSingVals.asDiagonal() * dec().matrixU().leftCols(diagSize).adjoint() * rhs(); } }; } // end namespace internal /** \svd_module * * \return the singular value decomposition of \c *this computed by two-sided * Jacobi transformations. * * \sa class JacobiSVD */ template JacobiSVD::PlainObject> MatrixBase::jacobiSvd(unsigned int computationOptions) const { return JacobiSVD(*this, computationOptions); } } // end namespace Eigen #endif // EIGEN_JACOBISVD_H RcppEigen/inst/include/unsupported/Eigen/src/SVD/SVDBase.h0000644000176200001440000002205412253717461023003 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Benoit Jacob // // Copyright (C) 2013 Gauthier Brun // Copyright (C) 2013 Nicolas Carre // Copyright (C) 2013 Jean Ceccato // Copyright (C) 2013 Pierre Zoppitelli // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SVD_H #define EIGEN_SVD_H namespace Eigen { /** \ingroup SVD_Module * * * \class SVDBase * * \brief Mother class of SVD classes algorithms * * \param MatrixType the type of the matrix of which we are computing the SVD decomposition * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product * \f[ A = U S V^* \f] * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal; * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left * and right \em singular \em vectors of \a A respectively. * * Singular values are always sorted in decreasing order. * * * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. * * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to * terminate in finite (and reasonable) time. * \sa MatrixBase::genericSvd() */ template class SVDBase { public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), MatrixOptions = MatrixType::Options }; typedef Matrix MatrixUType; typedef Matrix MatrixVType; typedef typename internal::plain_diag_type::type SingularValuesType; typedef typename internal::plain_row_type::type RowType; typedef typename internal::plain_col_type::type ColType; typedef Matrix WorkMatrixType; /** \brief Method performing the decomposition of given matrix using custom options. * * \param matrix the matrix to decompose * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, * #ComputeFullV, #ComputeThinV. * * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not * available with the (non-default) FullPivHouseholderQR preconditioner. */ SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions); /** \brief Method performing the decomposition of given matrix using current options. * * \param matrix the matrix to decompose * * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). */ //virtual SVDBase& compute(const MatrixType& matrix) = 0; SVDBase& compute(const MatrixType& matrix); /** \returns the \a U matrix. * * For the SVDBase decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU. * * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed. * * This method asserts that you asked for \a U to be computed. */ const MatrixUType& matrixU() const { eigen_assert(m_isInitialized && "SVD is not initialized."); eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); return m_matrixU; } /** \returns the \a V matrix. * * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV. * * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed. * * This method asserts that you asked for \a V to be computed. */ const MatrixVType& matrixV() const { eigen_assert(m_isInitialized && "SVD is not initialized."); eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?"); return m_matrixV; } /** \returns the vector of singular values. * * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the * returned vector has size \a m. Singular values are always sorted in decreasing order. */ const SingularValuesType& singularValues() const { eigen_assert(m_isInitialized && "SVD is not initialized."); return m_singularValues; } /** \returns the number of singular values that are not exactly 0 */ Index nonzeroSingularValues() const { eigen_assert(m_isInitialized && "SVD is not initialized."); return m_nonzeroSingularValues; } /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */ inline bool computeU() const { return m_computeFullU || m_computeThinU; } /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */ inline bool computeV() const { return m_computeFullV || m_computeThinV; } inline Index rows() const { return m_rows; } inline Index cols() const { return m_cols; } protected: // return true if already allocated bool allocate(Index rows, Index cols, unsigned int computationOptions) ; MatrixUType m_matrixU; MatrixVType m_matrixV; SingularValuesType m_singularValues; bool m_isInitialized, m_isAllocated; bool m_computeFullU, m_computeThinU; bool m_computeFullV, m_computeThinV; unsigned int m_computationOptions; Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; /** \brief Default Constructor. * * Default constructor of SVDBase */ SVDBase() : m_isInitialized(false), m_isAllocated(false), m_computationOptions(0), m_rows(-1), m_cols(-1) {} }; template bool SVDBase::allocate(Index rows, Index cols, unsigned int computationOptions) { eigen_assert(rows >= 0 && cols >= 0); if (m_isAllocated && rows == m_rows && cols == m_cols && computationOptions == m_computationOptions) { return true; } m_rows = rows; m_cols = cols; m_isInitialized = false; m_isAllocated = true; m_computationOptions = computationOptions; m_computeFullU = (computationOptions & ComputeFullU) != 0; m_computeThinU = (computationOptions & ComputeThinU) != 0; m_computeFullV = (computationOptions & ComputeFullV) != 0; m_computeThinV = (computationOptions & ComputeThinV) != 0; eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V"); eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && "SVDBase: thin U and V are only available when your matrix has a dynamic number of columns."); m_diagSize = (std::min)(m_rows, m_cols); m_singularValues.resize(m_diagSize); if(RowsAtCompileTime==Dynamic) m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0); if(ColsAtCompileTime==Dynamic) m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0); return false; } }// end namespace #endif // EIGEN_SVD_H RcppEigen/inst/include/unsupported/Eigen/src/SVD/BDCSVD.h0000644000176200001440000006517612253717461022535 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD" // research report written by Ming Gu and Stanley C.Eisenstat // The code variable names correspond to the names they used in their // report // // Copyright (C) 2013 Gauthier Brun // Copyright (C) 2013 Nicolas Carre // Copyright (C) 2013 Jean Ceccato // Copyright (C) 2013 Pierre Zoppitelli // // Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BDCSVD_H #define EIGEN_BDCSVD_H #define EPSILON 0.0000000000000001 #define ALGOSWAP 32 namespace Eigen { /** \ingroup SVD_Module * * * \class BDCSVD * * \brief class Bidiagonal Divide and Conquer SVD * * \param MatrixType the type of the matrix of which we are computing the SVD decomposition * We plan to have a very similar interface to JacobiSVD on this class. * It should be used to speed up the calcul of SVD for big matrices. */ template class BDCSVD : public SVDBase<_MatrixType> { typedef SVDBase<_MatrixType> Base; public: using Base::rows; using Base::cols; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime), MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime), MatrixOptions = MatrixType::Options }; typedef Matrix MatrixUType; typedef Matrix MatrixVType; typedef typename internal::plain_diag_type::type SingularValuesType; typedef typename internal::plain_row_type::type RowType; typedef typename internal::plain_col_type::type ColType; typedef Matrix MatrixX; typedef Matrix MatrixXr; typedef Matrix VectorType; /** \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via BDCSVD::compute(const MatrixType&). */ BDCSVD() : SVDBase<_MatrixType>::SVDBase(), algoswap(ALGOSWAP) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem size. * \sa BDCSVD() */ BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0) : SVDBase<_MatrixType>::SVDBase(), algoswap(ALGOSWAP) { allocate(rows, cols, computationOptions); } /** \brief Constructor performing the decomposition of given matrix. * * \param matrix the matrix to decompose * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, * #ComputeFullV, #ComputeThinV. * * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not * available with the (non - default) FullPivHouseholderQR preconditioner. */ BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0) : SVDBase<_MatrixType>::SVDBase(), algoswap(ALGOSWAP) { compute(matrix, computationOptions); } ~BDCSVD() { } /** \brief Method performing the decomposition of given matrix using custom options. * * \param matrix the matrix to decompose * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, * #ComputeFullV, #ComputeThinV. * * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not * available with the (non - default) FullPivHouseholderQR preconditioner. */ SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions); /** \brief Method performing the decomposition of given matrix using current options. * * \param matrix the matrix to decompose * * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). */ SVDBase& compute(const MatrixType& matrix) { return compute(matrix, this->m_computationOptions); } void setSwitchSize(int s) { eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 4"); algoswap = s; } /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. * * \param b the right - hand - side of the equation to solve. * * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. * * \note SVD solving is implicitly least - squares. Thus, this method serves both purposes of exact solving and least - squares solving. * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. */ template inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(this->m_isInitialized && "BDCSVD is not initialized."); eigen_assert(SVDBase<_MatrixType>::computeU() && SVDBase<_MatrixType>::computeV() && "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); return internal::solve_retval(*this, b.derived()); } const MatrixUType& matrixU() const { eigen_assert(this->m_isInitialized && "SVD is not initialized."); if (isTranspose){ eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?"); return this->m_matrixV; } else { eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); return this->m_matrixU; } } const MatrixVType& matrixV() const { eigen_assert(this->m_isInitialized && "SVD is not initialized."); if (isTranspose){ eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?"); return this->m_matrixU; } else { eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?"); return this->m_matrixV; } } private: void allocate(Index rows, Index cols, unsigned int computationOptions); void divide (Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift); void deflation43(Index firstCol, Index shift, Index i, Index size); void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size); void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift); void copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX houseHolderV); protected: MatrixXr m_naiveU, m_naiveV; MatrixXr m_computed; Index nRec; int algoswap; bool isTranspose, compU, compV; }; //end class BDCSVD // Methode to allocate ans initialize matrix and attributs template void BDCSVD::allocate(Index rows, Index cols, unsigned int computationOptions) { isTranspose = (cols > rows); if (SVDBase::allocate(rows, cols, computationOptions)) return; m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize ); if (isTranspose){ compU = this->computeU(); compV = this->computeV(); } else { compV = this->computeU(); compU = this->computeV(); } if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 ); else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 ); if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize); //should be changed for a cleaner implementation if (isTranspose){ bool aux; if (this->computeU()||this->computeV()){ aux = this->m_computeFullU; this->m_computeFullU = this->m_computeFullV; this->m_computeFullV = aux; aux = this->m_computeThinU; this->m_computeThinU = this->m_computeThinV; this->m_computeThinV = aux; } } }// end allocate // Methode which compute the BDCSVD for the int template<> SVDBase >& BDCSVD >::compute(const MatrixType& matrix, unsigned int computationOptions) { allocate(matrix.rows(), matrix.cols(), computationOptions); this->m_nonzeroSingularValues = 0; m_computed = Matrix::Zero(rows(), cols()); for (int i=0; im_diagSize; i++) { this->m_singularValues.coeffRef(i) = 0; } if (this->m_computeFullU) this->m_matrixU = Matrix::Zero(rows(), rows()); if (this->m_computeFullV) this->m_matrixV = Matrix::Zero(cols(), cols()); this->m_isInitialized = true; return *this; } // Methode which compute the BDCSVD template SVDBase& BDCSVD::compute(const MatrixType& matrix, unsigned int computationOptions) { allocate(matrix.rows(), matrix.cols(), computationOptions); using std::abs; //**** step 1 Bidiagonalization isTranspose = (matrix.cols()>matrix.rows()) ; MatrixType copy; if (isTranspose) copy = matrix.adjoint(); else copy = matrix; internal::UpperBidiagonalization bid(copy); //**** step 2 Divide // this is ugly and has to be redone (care of complex cast) MatrixXr temp; temp = bid.bidiagonal().toDenseMatrix().transpose(); m_computed.setZero(); for (int i=0; im_diagSize - 1; i++) { m_computed(i, i) = temp(i, i); m_computed(i + 1, i) = temp(i + 1, i); } m_computed(this->m_diagSize - 1, this->m_diagSize - 1) = temp(this->m_diagSize - 1, this->m_diagSize - 1); divide(0, this->m_diagSize - 1, 0, 0, 0); //**** step 3 copy for (int i=0; im_diagSize; i++) { RealScalar a = abs(m_computed.coeff(i, i)); this->m_singularValues.coeffRef(i) = a; if (a == 0){ this->m_nonzeroSingularValues = i; break; } else if (i == this->m_diagSize - 1) { this->m_nonzeroSingularValues = i + 1; break; } } copyUV(m_naiveV, m_naiveU, bid.householderU(), bid.householderV()); this->m_isInitialized = true; return *this; }// end compute template void BDCSVD::copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX householderV){ if (this->computeU()){ MatrixX temp = MatrixX::Zero(naiveU.rows(), naiveU.cols()); temp.real() = naiveU; if (this->m_computeThinU){ this->m_matrixU = MatrixX::Identity(householderU.cols(), this->m_nonzeroSingularValues ); this->m_matrixU.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues) = temp.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues); this->m_matrixU = householderU * this->m_matrixU ; } else { this->m_matrixU = MatrixX::Identity(householderU.cols(), householderU.cols()); this->m_matrixU.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize); this->m_matrixU = householderU * this->m_matrixU ; } } if (this->computeV()){ MatrixX temp = MatrixX::Zero(naiveV.rows(), naiveV.cols()); temp.real() = naiveV; if (this->m_computeThinV){ this->m_matrixV = MatrixX::Identity(householderV.cols(),this->m_nonzeroSingularValues ); this->m_matrixV.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues) = temp.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues); this->m_matrixV = householderV * this->m_matrixV ; } else { this->m_matrixV = MatrixX::Identity(householderV.cols(), householderV.cols()); this->m_matrixV.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize); this->m_matrixV = householderV * this->m_matrixV; } } } // The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the // place of the submatrix we are currently working on. //@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU; //@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU; // lastCol + 1 - firstCol is the size of the submatrix. //@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W) //@param firstRowW : Same as firstRowW with the column. //@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix // to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper. template void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift) { // requires nbRows = nbCols + 1; using std::pow; using std::sqrt; using std::abs; const Index n = lastCol - firstCol + 1; const Index k = n/2; RealScalar alphaK; RealScalar betaK; RealScalar r0; RealScalar lambda, phi, c0, s0; MatrixXr l, f; // We use the other algorithm which is more efficient for small // matrices. if (n < algoswap){ JacobiSVD b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (ComputeFullV * compV)) ; if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU(); else { m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0); m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n); } if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV(); m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero(); for (int i=0; i= firstCol; i--) { m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1); } // we shift q1 at the left with a factor c0 m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0); // last column = q1 * - s0 m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0)); // first column = q2 * s0 m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) << m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0; // q2 *= c0 m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0; } else { RealScalar q1 = (m_naiveU(0, firstCol + k)); // we shift Q1 to the right for (Index i = firstCol + k - 1; i >= firstCol; i--) { m_naiveU(0, i + 1) = m_naiveU(0, i); } // we shift q1 at the left with a factor c0 m_naiveU(0, firstCol) = (q1 * c0); // last column = q1 * - s0 m_naiveU(0, lastCol + 1) = (q1 * ( - s0)); // first column = q2 * s0 m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0; // q2 *= c0 m_naiveU(1, lastCol + 1) *= c0; m_naiveU.row(1).segment(firstCol + 1, k).setZero(); m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero(); } m_computed(firstCol + shift, firstCol + shift) = r0; m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real(); m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real(); // the line below do the deflation of the matrix for the third part of the algorithm // Here the deflation is commented because the third part of the algorithm is not implemented // the third part of the algorithm is a fast SVD on the matrix m_computed which works thanks to the deflation deflation(firstCol, lastCol, k, firstRowW, firstColW, shift); // Third part of the algorithm, since the real third part of the algorithm is not implemeted we use a JacobiSVD JacobiSVD res= JacobiSVD(m_computed.block(firstCol + shift, firstCol +shift, n + 1, n), ComputeFullU | (ComputeFullV * compV)) ; if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= res.matrixU(); else m_naiveU.block(0, firstCol, 2, n + 1) *= res.matrixU(); if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= res.matrixV(); m_computed.block(firstCol + shift, firstCol + shift, n, n) << MatrixXr::Zero(n, n); for (int i=0; i= 1, di almost null and zi non null. // We use a rotation to zero out zi applied to the left of M template void BDCSVD::deflation43(Index firstCol, Index shift, Index i, Index size){ using std::abs; using std::sqrt; using std::pow; RealScalar c = m_computed(firstCol + shift, firstCol + shift); RealScalar s = m_computed(i, firstCol + shift); RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2)); if (r == 0){ m_computed(i, i)=0; return; } c/=r; s/=r; m_computed(firstCol + shift, firstCol + shift) = r; m_computed(i, firstCol + shift) = 0; m_computed(i, i) = 0; if (compU){ m_naiveU.col(firstCol).segment(firstCol,size) = c * m_naiveU.col(firstCol).segment(firstCol, size) - s * m_naiveU.col(i).segment(firstCol, size) ; m_naiveU.col(i).segment(firstCol, size) = (c + s*s/c) * m_naiveU.col(i).segment(firstCol, size) + (s/c) * m_naiveU.col(firstCol).segment(firstCol,size); } }// end deflation 43 // page 13 // i,j >= 1, i != j and |di - dj| < epsilon * norm2(M) // We apply two rotations to have zj = 0; template void BDCSVD::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){ using std::abs; using std::sqrt; using std::conj; using std::pow; RealScalar c = m_computed(firstColm, firstColm + j - 1); RealScalar s = m_computed(firstColm, firstColm + i - 1); RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2)); if (r==0){ m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); return; } c/=r; s/=r; m_computed(firstColm + i, firstColm) = r; m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); m_computed(firstColm + j, firstColm) = 0; if (compU){ m_naiveU.col(firstColu + i).segment(firstColu, size) = c * m_naiveU.col(firstColu + i).segment(firstColu, size) - s * m_naiveU.col(firstColu + j).segment(firstColu, size) ; m_naiveU.col(firstColu + j).segment(firstColu, size) = (c + s*s/c) * m_naiveU.col(firstColu + j).segment(firstColu, size) + (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size); } if (compV){ m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) = c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) + s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ; m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) = (c + s*s/c) * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) - (s/c) * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1); } }// end deflation 44 template void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){ //condition 4.1 RealScalar EPS = EPSILON * (std::max(m_computed(firstCol + shift + 1, firstCol + shift + 1), m_computed(firstCol + k, firstCol + k))); const Index length = lastCol + 1 - firstCol; if (m_computed(firstCol + shift, firstCol + shift) < EPS){ m_computed(firstCol + shift, firstCol + shift) = EPS; } //condition 4.2 for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){ if (std::abs(m_computed(i, firstCol + shift)) < EPS){ m_computed(i, firstCol + shift) = 0; } } //condition 4.3 for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){ if (m_computed(i, i) < EPS){ deflation43(firstCol, shift, i, length); } } //condition 4.4 Index i=firstCol + shift + 1, j=firstCol + shift + k + 1; //we stock the final place of each line Index *permutation = new Index[length]; for (Index p =1; p < length; p++) { if (i> firstCol + shift + k){ permutation[p] = j; j++; } else if (j> lastCol + shift) { permutation[p] = i; i++; } else { if (m_computed(i, i) < m_computed(j, j)){ permutation[p] = j; j++; } else { permutation[p] = i; i++; } } } //we do the permutation RealScalar aux; //we stock the current index of each col //and the column of each index Index *realInd = new Index[length]; Index *realCol = new Index[length]; for (int pos = 0; pos< length; pos++){ realCol[pos] = pos + firstCol + shift; realInd[pos] = pos; } const Index Zero = firstCol + shift; VectorType temp; for (int i = 1; i < length - 1; i++){ const Index I = i + Zero; const Index realI = realInd[i]; const Index j = permutation[length - i] - Zero; const Index J = realCol[j]; //diag displace aux = m_computed(I, I); m_computed(I, I) = m_computed(J, J); m_computed(J, J) = aux; //firstrow displace aux = m_computed(I, Zero); m_computed(I, Zero) = m_computed(J, Zero); m_computed(J, Zero) = aux; // change columns if (compU) { temp = m_naiveU.col(I - shift).segment(firstCol, length + 1); m_naiveU.col(I - shift).segment(firstCol, length + 1) << m_naiveU.col(J - shift).segment(firstCol, length + 1); m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp; } else { temp = m_naiveU.col(I - shift).segment(0, 2); m_naiveU.col(I - shift).segment(0, 2) << m_naiveU.col(J - shift).segment(0, 2); m_naiveU.col(J - shift).segment(0, 2) << temp; } if (compV) { const Index CWI = I + firstColW - Zero; const Index CWJ = J + firstColW - Zero; temp = m_naiveV.col(CWI).segment(firstRowW, length); m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length); m_naiveV.col(CWJ).segment(firstRowW, length) << temp; } //update real pos realCol[realI] = J; realCol[j] = I; realInd[J - Zero] = realI; realInd[I - Zero] = j; } for (Index i = firstCol + shift + 1; i struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef BDCSVD<_MatrixType> BDCSVDType; EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs) template void evalTo(Dest& dst) const { eigen_assert(rhs().rows() == dec().rows()); // A = U S V^* // So A^{ - 1} = V S^{ - 1} U^* Index diagSize = (std::min)(dec().rows(), dec().cols()); typename BDCSVDType::SingularValuesType invertedSingVals(diagSize); Index nonzeroSingVals = dec().nonzeroSingularValues(); invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse(); invertedSingVals.tail(diagSize - nonzeroSingVals).setZero(); dst = dec().matrixV().leftCols(diagSize) * invertedSingVals.asDiagonal() * dec().matrixU().leftCols(diagSize).adjoint() * rhs(); return; } }; } //end namespace internal /** \svd_module * * \return the singular value decomposition of \c *this computed by * BDC Algorithm * * \sa class BDCSVD */ /* template BDCSVD::PlainObject> MatrixBase::bdcSvd(unsigned int computationOptions) const { return BDCSVD(*this, computationOptions); } */ } // end namespace Eigen #endif RcppEigen/inst/include/unsupported/Eigen/src/BVH/0000755000176200001440000000000012253717461021363 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/BVH/BVAlgorithms.h0000644000176200001440000003126012253717461024077 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Ilya Baran // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BVALGORITHMS_H #define EIGEN_BVALGORITHMS_H namespace Eigen { namespace internal { #ifndef EIGEN_PARSED_BY_DOXYGEN template bool intersect_helper(const BVH &tree, Intersector &intersector, typename BVH::Index root) { typedef typename BVH::Index Index; typedef typename BVH::VolumeIterator VolIter; typedef typename BVH::ObjectIterator ObjIter; VolIter vBegin = VolIter(), vEnd = VolIter(); ObjIter oBegin = ObjIter(), oEnd = ObjIter(); std::vector todo(1, root); while(!todo.empty()) { tree.getChildren(todo.back(), vBegin, vEnd, oBegin, oEnd); todo.pop_back(); for(; vBegin != vEnd; ++vBegin) //go through child volumes if(intersector.intersectVolume(tree.getVolume(*vBegin))) todo.push_back(*vBegin); for(; oBegin != oEnd; ++oBegin) //go through child objects if(intersector.intersectObject(*oBegin)) return true; //intersector said to stop query } return false; } #endif //not EIGEN_PARSED_BY_DOXYGEN template struct intersector_helper1 { intersector_helper1(const Object2 &inStored, Intersector &in) : stored(inStored), intersector(in) {} bool intersectVolume(const Volume1 &vol) { return intersector.intersectVolumeObject(vol, stored); } bool intersectObject(const Object1 &obj) { return intersector.intersectObjectObject(obj, stored); } Object2 stored; Intersector &intersector; private: intersector_helper1& operator=(const intersector_helper1&); }; template struct intersector_helper2 { intersector_helper2(const Object1 &inStored, Intersector &in) : stored(inStored), intersector(in) {} bool intersectVolume(const Volume2 &vol) { return intersector.intersectObjectVolume(stored, vol); } bool intersectObject(const Object2 &obj) { return intersector.intersectObjectObject(stored, obj); } Object1 stored; Intersector &intersector; private: intersector_helper2& operator=(const intersector_helper2&); }; } // end namespace internal /** Given a BVH, runs the query encapsulated by \a intersector. * The Intersector type must provide the following members: \code bool intersectVolume(const BVH::Volume &volume) //returns true if volume intersects the query bool intersectObject(const BVH::Object &object) //returns true if the search should terminate immediately \endcode */ template void BVIntersect(const BVH &tree, Intersector &intersector) { internal::intersect_helper(tree, intersector, tree.getRootIndex()); } /** Given two BVH's, runs the query on their Cartesian product encapsulated by \a intersector. * The Intersector type must provide the following members: \code bool intersectVolumeVolume(const BVH1::Volume &v1, const BVH2::Volume &v2) //returns true if product of volumes intersects the query bool intersectVolumeObject(const BVH1::Volume &v1, const BVH2::Object &o2) //returns true if the volume-object product intersects the query bool intersectObjectVolume(const BVH1::Object &o1, const BVH2::Volume &v2) //returns true if the volume-object product intersects the query bool intersectObjectObject(const BVH1::Object &o1, const BVH2::Object &o2) //returns true if the search should terminate immediately \endcode */ template void BVIntersect(const BVH1 &tree1, const BVH2 &tree2, Intersector &intersector) //TODO: tandem descent when it makes sense { typedef typename BVH1::Index Index1; typedef typename BVH2::Index Index2; typedef internal::intersector_helper1 Helper1; typedef internal::intersector_helper2 Helper2; typedef typename BVH1::VolumeIterator VolIter1; typedef typename BVH1::ObjectIterator ObjIter1; typedef typename BVH2::VolumeIterator VolIter2; typedef typename BVH2::ObjectIterator ObjIter2; VolIter1 vBegin1 = VolIter1(), vEnd1 = VolIter1(); ObjIter1 oBegin1 = ObjIter1(), oEnd1 = ObjIter1(); VolIter2 vBegin2 = VolIter2(), vEnd2 = VolIter2(), vCur2 = VolIter2(); ObjIter2 oBegin2 = ObjIter2(), oEnd2 = ObjIter2(), oCur2 = ObjIter2(); std::vector > todo(1, std::make_pair(tree1.getRootIndex(), tree2.getRootIndex())); while(!todo.empty()) { tree1.getChildren(todo.back().first, vBegin1, vEnd1, oBegin1, oEnd1); tree2.getChildren(todo.back().second, vBegin2, vEnd2, oBegin2, oEnd2); todo.pop_back(); for(; vBegin1 != vEnd1; ++vBegin1) { //go through child volumes of first tree const typename BVH1::Volume &vol1 = tree1.getVolume(*vBegin1); for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree if(intersector.intersectVolumeVolume(vol1, tree2.getVolume(*vCur2))) todo.push_back(std::make_pair(*vBegin1, *vCur2)); } for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree Helper1 helper(*oCur2, intersector); if(internal::intersect_helper(tree1, helper, *vBegin1)) return; //intersector said to stop query } } for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree Helper2 helper(*oBegin1, intersector); if(internal::intersect_helper(tree2, helper, *vCur2)) return; //intersector said to stop query } for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree if(intersector.intersectObjectObject(*oBegin1, *oCur2)) return; //intersector said to stop query } } } } namespace internal { #ifndef EIGEN_PARSED_BY_DOXYGEN template typename Minimizer::Scalar minimize_helper(const BVH &tree, Minimizer &minimizer, typename BVH::Index root, typename Minimizer::Scalar minimum) { typedef typename Minimizer::Scalar Scalar; typedef typename BVH::Index Index; typedef std::pair QueueElement; //first element is priority typedef typename BVH::VolumeIterator VolIter; typedef typename BVH::ObjectIterator ObjIter; VolIter vBegin = VolIter(), vEnd = VolIter(); ObjIter oBegin = ObjIter(), oEnd = ObjIter(); std::priority_queue, std::greater > todo; //smallest is at the top todo.push(std::make_pair(Scalar(), root)); while(!todo.empty()) { tree.getChildren(todo.top().second, vBegin, vEnd, oBegin, oEnd); todo.pop(); for(; oBegin != oEnd; ++oBegin) //go through child objects minimum = (std::min)(minimum, minimizer.minimumOnObject(*oBegin)); for(; vBegin != vEnd; ++vBegin) { //go through child volumes Scalar val = minimizer.minimumOnVolume(tree.getVolume(*vBegin)); if(val < minimum) todo.push(std::make_pair(val, *vBegin)); } } return minimum; } #endif //not EIGEN_PARSED_BY_DOXYGEN template struct minimizer_helper1 { typedef typename Minimizer::Scalar Scalar; minimizer_helper1(const Object2 &inStored, Minimizer &m) : stored(inStored), minimizer(m) {} Scalar minimumOnVolume(const Volume1 &vol) { return minimizer.minimumOnVolumeObject(vol, stored); } Scalar minimumOnObject(const Object1 &obj) { return minimizer.minimumOnObjectObject(obj, stored); } Object2 stored; Minimizer &minimizer; private: minimizer_helper1& operator=(const minimizer_helper1&); }; template struct minimizer_helper2 { typedef typename Minimizer::Scalar Scalar; minimizer_helper2(const Object1 &inStored, Minimizer &m) : stored(inStored), minimizer(m) {} Scalar minimumOnVolume(const Volume2 &vol) { return minimizer.minimumOnObjectVolume(stored, vol); } Scalar minimumOnObject(const Object2 &obj) { return minimizer.minimumOnObjectObject(stored, obj); } Object1 stored; Minimizer &minimizer; private: minimizer_helper2& operator=(const minimizer_helper2&); }; } // end namespace internal /** Given a BVH, runs the query encapsulated by \a minimizer. * \returns the minimum value. * The Minimizer type must provide the following members: \code typedef Scalar //the numeric type of what is being minimized--not necessarily the Scalar type of the BVH (if it has one) Scalar minimumOnVolume(const BVH::Volume &volume) Scalar minimumOnObject(const BVH::Object &object) \endcode */ template typename Minimizer::Scalar BVMinimize(const BVH &tree, Minimizer &minimizer) { return internal::minimize_helper(tree, minimizer, tree.getRootIndex(), (std::numeric_limits::max)()); } /** Given two BVH's, runs the query on their cartesian product encapsulated by \a minimizer. * \returns the minimum value. * The Minimizer type must provide the following members: \code typedef Scalar //the numeric type of what is being minimized--not necessarily the Scalar type of the BVH (if it has one) Scalar minimumOnVolumeVolume(const BVH1::Volume &v1, const BVH2::Volume &v2) Scalar minimumOnVolumeObject(const BVH1::Volume &v1, const BVH2::Object &o2) Scalar minimumOnObjectVolume(const BVH1::Object &o1, const BVH2::Volume &v2) Scalar minimumOnObjectObject(const BVH1::Object &o1, const BVH2::Object &o2) \endcode */ template typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Minimizer &minimizer) { typedef typename Minimizer::Scalar Scalar; typedef typename BVH1::Index Index1; typedef typename BVH2::Index Index2; typedef internal::minimizer_helper1 Helper1; typedef internal::minimizer_helper2 Helper2; typedef std::pair > QueueElement; //first element is priority typedef typename BVH1::VolumeIterator VolIter1; typedef typename BVH1::ObjectIterator ObjIter1; typedef typename BVH2::VolumeIterator VolIter2; typedef typename BVH2::ObjectIterator ObjIter2; VolIter1 vBegin1 = VolIter1(), vEnd1 = VolIter1(); ObjIter1 oBegin1 = ObjIter1(), oEnd1 = ObjIter1(); VolIter2 vBegin2 = VolIter2(), vEnd2 = VolIter2(), vCur2 = VolIter2(); ObjIter2 oBegin2 = ObjIter2(), oEnd2 = ObjIter2(), oCur2 = ObjIter2(); std::priority_queue, std::greater > todo; //smallest is at the top Scalar minimum = (std::numeric_limits::max)(); todo.push(std::make_pair(Scalar(), std::make_pair(tree1.getRootIndex(), tree2.getRootIndex()))); while(!todo.empty()) { tree1.getChildren(todo.top().second.first, vBegin1, vEnd1, oBegin1, oEnd1); tree2.getChildren(todo.top().second.second, vBegin2, vEnd2, oBegin2, oEnd2); todo.pop(); for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree minimum = (std::min)(minimum, minimizer.minimumOnObjectObject(*oBegin1, *oCur2)); } for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree Helper2 helper(*oBegin1, minimizer); minimum = (std::min)(minimum, internal::minimize_helper(tree2, helper, *vCur2, minimum)); } } for(; vBegin1 != vEnd1; ++vBegin1) { //go through child volumes of first tree const typename BVH1::Volume &vol1 = tree1.getVolume(*vBegin1); for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree Helper1 helper(*oCur2, minimizer); minimum = (std::min)(minimum, internal::minimize_helper(tree1, helper, *vBegin1, minimum)); } for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree Scalar val = minimizer.minimumOnVolumeVolume(vol1, tree2.getVolume(*vCur2)); if(val < minimum) todo.push(std::make_pair(val, std::make_pair(*vBegin1, *vCur2))); } } } return minimum; } } // end namespace Eigen #endif // EIGEN_BVALGORITHMS_H RcppEigen/inst/include/unsupported/Eigen/src/BVH/KdBVH.h0000644000176200001440000002164612253717461022443 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Ilya Baran // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef KDBVH_H_INCLUDED #define KDBVH_H_INCLUDED namespace Eigen { namespace internal { //internal pair class for the BVH--used instead of std::pair because of alignment template struct vector_int_pair { EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar, Dim) typedef Matrix VectorType; vector_int_pair(const VectorType &v, int i) : first(v), second(i) {} VectorType first; int second; }; //these templates help the tree initializer get the bounding boxes either from a provided //iterator range or using bounding_box in a unified way template struct get_boxes_helper { void operator()(const ObjectList &objects, BoxIter boxBegin, BoxIter boxEnd, VolumeList &outBoxes) { outBoxes.insert(outBoxes.end(), boxBegin, boxEnd); eigen_assert(outBoxes.size() == objects.size()); } }; template struct get_boxes_helper { void operator()(const ObjectList &objects, int, int, VolumeList &outBoxes) { outBoxes.reserve(objects.size()); for(int i = 0; i < (int)objects.size(); ++i) outBoxes.push_back(bounding_box(objects[i])); } }; } // end namespace internal /** \class KdBVH * \brief A simple bounding volume hierarchy based on AlignedBox * * \param _Scalar The underlying scalar type of the bounding boxes * \param _Dim The dimension of the space in which the hierarchy lives * \param _Object The object type that lives in the hierarchy. It must have value semantics. Either bounding_box(_Object) must * be defined and return an AlignedBox<_Scalar, _Dim> or bounding boxes must be provided to the tree initializer. * * This class provides a simple (as opposed to optimized) implementation of a bounding volume hierarchy analogous to a Kd-tree. * Given a sequence of objects, it computes their bounding boxes, constructs a Kd-tree of their centers * and builds a BVH with the structure of that Kd-tree. When the elements of the tree are too expensive to be copied around, * it is useful for _Object to be a pointer. */ template class KdBVH { public: enum { Dim = _Dim }; typedef _Object Object; typedef std::vector > ObjectList; typedef _Scalar Scalar; typedef AlignedBox Volume; typedef std::vector > VolumeList; typedef int Index; typedef const int *VolumeIterator; //the iterators are just pointers into the tree's vectors typedef const Object *ObjectIterator; KdBVH() {} /** Given an iterator range over \a Object references, constructs the BVH. Requires that bounding_box(Object) return a Volume. */ template KdBVH(Iter begin, Iter end) { init(begin, end, 0, 0); } //int is recognized by init as not being an iterator type /** Given an iterator range over \a Object references and an iterator range over their bounding boxes, constructs the BVH */ template KdBVH(OIter begin, OIter end, BIter boxBegin, BIter boxEnd) { init(begin, end, boxBegin, boxEnd); } /** Given an iterator range over \a Object references, constructs the BVH, overwriting whatever is in there currently. * Requires that bounding_box(Object) return a Volume. */ template void init(Iter begin, Iter end) { init(begin, end, 0, 0); } /** Given an iterator range over \a Object references and an iterator range over their bounding boxes, * constructs the BVH, overwriting whatever is in there currently. */ template void init(OIter begin, OIter end, BIter boxBegin, BIter boxEnd) { objects.clear(); boxes.clear(); children.clear(); objects.insert(objects.end(), begin, end); int n = static_cast(objects.size()); if(n < 2) return; //if we have at most one object, we don't need any internal nodes VolumeList objBoxes; VIPairList objCenters; //compute the bounding boxes depending on BIter type internal::get_boxes_helper()(objects, boxBegin, boxEnd, objBoxes); objCenters.reserve(n); boxes.reserve(n - 1); children.reserve(2 * n - 2); for(int i = 0; i < n; ++i) objCenters.push_back(VIPair(objBoxes[i].center(), i)); build(objCenters, 0, n, objBoxes, 0); //the recursive part of the algorithm ObjectList tmp(n); tmp.swap(objects); for(int i = 0; i < n; ++i) objects[i] = tmp[objCenters[i].second]; } /** \returns the index of the root of the hierarchy */ inline Index getRootIndex() const { return (int)boxes.size() - 1; } /** Given an \a index of a node, on exit, \a outVBegin and \a outVEnd range over the indices of the volume children of the node * and \a outOBegin and \a outOEnd range over the object children of the node */ EIGEN_STRONG_INLINE void getChildren(Index index, VolumeIterator &outVBegin, VolumeIterator &outVEnd, ObjectIterator &outOBegin, ObjectIterator &outOEnd) const { //inlining this function should open lots of optimization opportunities to the compiler if(index < 0) { outVBegin = outVEnd; if(!objects.empty()) outOBegin = &(objects[0]); outOEnd = outOBegin + objects.size(); //output all objects--necessary when the tree has only one object return; } int numBoxes = static_cast(boxes.size()); int idx = index * 2; if(children[idx + 1] < numBoxes) { //second index is always bigger outVBegin = &(children[idx]); outVEnd = outVBegin + 2; outOBegin = outOEnd; } else if(children[idx] >= numBoxes) { //if both children are objects outVBegin = outVEnd; outOBegin = &(objects[children[idx] - numBoxes]); outOEnd = outOBegin + 2; } else { //if the first child is a volume and the second is an object outVBegin = &(children[idx]); outVEnd = outVBegin + 1; outOBegin = &(objects[children[idx + 1] - numBoxes]); outOEnd = outOBegin + 1; } } /** \returns the bounding box of the node at \a index */ inline const Volume &getVolume(Index index) const { return boxes[index]; } private: typedef internal::vector_int_pair VIPair; typedef std::vector > VIPairList; typedef Matrix VectorType; struct VectorComparator //compares vectors, or, more specificall, VIPairs along a particular dimension { VectorComparator(int inDim) : dim(inDim) {} inline bool operator()(const VIPair &v1, const VIPair &v2) const { return v1.first[dim] < v2.first[dim]; } int dim; }; //Build the part of the tree between objects[from] and objects[to] (not including objects[to]). //This routine partitions the objCenters in [from, to) along the dimension dim, recursively constructs //the two halves, and adds their parent node. TODO: a cache-friendlier layout void build(VIPairList &objCenters, int from, int to, const VolumeList &objBoxes, int dim) { eigen_assert(to - from > 1); if(to - from == 2) { boxes.push_back(objBoxes[objCenters[from].second].merged(objBoxes[objCenters[from + 1].second])); children.push_back(from + (int)objects.size() - 1); //there are objects.size() - 1 tree nodes children.push_back(from + (int)objects.size()); } else if(to - from == 3) { int mid = from + 2; std::nth_element(objCenters.begin() + from, objCenters.begin() + mid, objCenters.begin() + to, VectorComparator(dim)); //partition build(objCenters, from, mid, objBoxes, (dim + 1) % Dim); int idx1 = (int)boxes.size() - 1; boxes.push_back(boxes[idx1].merged(objBoxes[objCenters[mid].second])); children.push_back(idx1); children.push_back(mid + (int)objects.size() - 1); } else { int mid = from + (to - from) / 2; nth_element(objCenters.begin() + from, objCenters.begin() + mid, objCenters.begin() + to, VectorComparator(dim)); //partition build(objCenters, from, mid, objBoxes, (dim + 1) % Dim); int idx1 = (int)boxes.size() - 1; build(objCenters, mid, to, objBoxes, (dim + 1) % Dim); int idx2 = (int)boxes.size() - 1; boxes.push_back(boxes[idx1].merged(boxes[idx2])); children.push_back(idx1); children.push_back(idx2); } } std::vector children; //children of x are children[2x] and children[2x+1], indices bigger than boxes.size() index into objects. VolumeList boxes; ObjectList objects; }; } // end namespace Eigen #endif //KDBVH_H_INCLUDED RcppEigen/inst/include/unsupported/Eigen/src/MoreVectorization/0000755000176200001440000000000012253717461024427 5ustar liggesusersRcppEigen/inst/include/unsupported/Eigen/src/MoreVectorization/MathFunctions.h0000644000176200001440000000573312253717461027372 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Rohit Garg // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H #define EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H namespace Eigen { namespace internal { /** \internal \returns the arcsin of \a a (coeff-wise) */ template inline static Packet pasin(Packet a) { return std::asin(a); } #ifdef EIGEN_VECTORIZE_SSE template<> EIGEN_DONT_INLINE Packet4f pasin(Packet4f x) { _EIGEN_DECLARE_CONST_Packet4f(half, 0.5); _EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5); _EIGEN_DECLARE_CONST_Packet4f(3half, 1.5); _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000); _EIGEN_DECLARE_CONST_Packet4f(pi, 3.141592654); _EIGEN_DECLARE_CONST_Packet4f(pi_over_2, 3.141592654*0.5); _EIGEN_DECLARE_CONST_Packet4f(asin1, 4.2163199048E-2); _EIGEN_DECLARE_CONST_Packet4f(asin2, 2.4181311049E-2); _EIGEN_DECLARE_CONST_Packet4f(asin3, 4.5470025998E-2); _EIGEN_DECLARE_CONST_Packet4f(asin4, 7.4953002686E-2); _EIGEN_DECLARE_CONST_Packet4f(asin5, 1.6666752422E-1); Packet4f a = pabs(x);//got the absolute value Packet4f sign_bit= _mm_and_ps(x, p4f_sign_mask);//extracted the sign bit Packet4f z1,z2;//will need them during computation //will compute the two branches for asin //so first compare with half Packet4f branch_mask= _mm_cmpgt_ps(a, p4f_half);//this is to select which branch to take //both will be taken, and finally results will be merged //the branch for values >0.5 { //the core series expansion z1=pmadd(p4f_minus_half,a,p4f_half); Packet4f x1=psqrt(z1); Packet4f s1=pmadd(p4f_asin1, z1, p4f_asin2); Packet4f s2=pmadd(s1, z1, p4f_asin3); Packet4f s3=pmadd(s2,z1, p4f_asin4); Packet4f s4=pmadd(s3,z1, p4f_asin5); Packet4f temp=pmul(s4,z1);//not really a madd but a mul by z so that the next term can be a madd z1=pmadd(temp,x1,x1); z1=padd(z1,z1); z1=psub(p4f_pi_over_2,z1); } { //the core series expansion Packet4f x2=a; z2=pmul(x2,x2); Packet4f s1=pmadd(p4f_asin1, z2, p4f_asin2); Packet4f s2=pmadd(s1, z2, p4f_asin3); Packet4f s3=pmadd(s2,z2, p4f_asin4); Packet4f s4=pmadd(s3,z2, p4f_asin5); Packet4f temp=pmul(s4,z2);//not really a madd but a mul by z so that the next term can be a madd z2=pmadd(temp,x2,x2); } /* select the correct result from the two branch evaluations */ z1 = _mm_and_ps(branch_mask, z1); z2 = _mm_andnot_ps(branch_mask, z2); Packet4f z = _mm_or_ps(z1,z2); /* update the sign */ return _mm_xor_ps(z, sign_bit); } #endif // EIGEN_VECTORIZE_SSE } // end namespace internal } // end namespace Eigen #endif // EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H RcppEigen/inst/include/unsupported/Eigen/KroneckerProduct0000644000176200001440000000157412253717461023373 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_KRONECKER_PRODUCT_MODULE_H #define EIGEN_KRONECKER_PRODUCT_MODULE_H #include "../../Eigen/Core" #include "../../Eigen/src/Core/util/DisableStupidWarnings.h" namespace Eigen { /** * \defgroup KroneckerProduct_Module KroneckerProduct module * * This module contains an experimental Kronecker product implementation. * * \code * #include * \endcode */ } // namespace Eigen #include "src/KroneckerProduct/KroneckerTensorProduct.h" #include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_KRONECKER_PRODUCT_MODULE_H RcppEigen/inst/include/unsupported/Eigen/MPRealSupport0000644000176200001440000001645212446006401022612 0ustar liggesusers// This file is part of a joint effort between Eigen, a lightweight C++ template library // for linear algebra, and MPFR C++, a C++ interface to MPFR library (http://www.holoborodko.com/pavel/) // // Copyright (C) 2010-2012 Pavel Holoborodko // Copyright (C) 2010 Konstantin Holoborodko // Copyright (C) 2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MPREALSUPPORT_MODULE_H #define EIGEN_MPREALSUPPORT_MODULE_H #include #include namespace Eigen { /** * \defgroup MPRealSupport_Module MPFRC++ Support module * \code * #include * \endcode * * This module provides support for multi precision floating point numbers * via the
MPFR C++ * library which itself is built upon MPFR/GMP. * * You can find a copy of MPFR C++ that is known to be compatible in the unsupported/test/mpreal folder. * * Here is an example: * \code #include #include #include using namespace mpfr; using namespace Eigen; int main() { // set precision to 256 bits (double has only 53 bits) mpreal::set_default_prec(256); // Declare matrix and vector types with multi-precision scalar type typedef Matrix MatrixXmp; typedef Matrix VectorXmp; MatrixXmp A = MatrixXmp::Random(100,100); VectorXmp b = VectorXmp::Random(100); // Solve Ax=b using LU VectorXmp x = A.lu().solve(b); std::cout << "relative error: " << (A*x - b).norm() / b.norm() << std::endl; return 0; } \endcode * */ template<> struct NumTraits : GenericNumTraits { enum { IsInteger = 0, IsSigned = 1, IsComplex = 0, RequireInitialization = 1, ReadCost = 10, AddCost = 10, MulCost = 40 }; typedef mpfr::mpreal Real; typedef mpfr::mpreal NonInteger; inline static Real highest (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::maxval(Precision); } inline static Real lowest (long Precision = mpfr::mpreal::get_default_prec()) { return -mpfr::maxval(Precision); } // Constants inline static Real Pi (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_pi(Precision); } inline static Real Euler (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_euler(Precision); } inline static Real Log2 (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_log2(Precision); } inline static Real Catalan (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_catalan(Precision); } inline static Real epsilon (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::machine_epsilon(Precision); } inline static Real epsilon (const Real& x) { return mpfr::machine_epsilon(x); } inline static Real dummy_precision() { unsigned int weak_prec = ((mpfr::mpreal::get_default_prec()-1) * 90) / 100; return mpfr::machine_epsilon(weak_prec); } }; namespace internal { template<> inline mpfr::mpreal random() { return mpfr::random(); } template<> inline mpfr::mpreal random(const mpfr::mpreal& a, const mpfr::mpreal& b) { return a + (b-a) * random(); } inline bool isMuchSmallerThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps) { return mpfr::abs(a) <= mpfr::abs(b) * eps; } inline bool isApprox(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps) { return mpfr::isEqualFuzzy(a,b,eps); } inline bool isApproxOrLessThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps) { return a <= b || mpfr::isEqualFuzzy(a,b,eps); } template<> inline long double cast(const mpfr::mpreal& x) { return x.toLDouble(); } template<> inline double cast(const mpfr::mpreal& x) { return x.toDouble(); } template<> inline long cast(const mpfr::mpreal& x) { return x.toLong(); } template<> inline int cast(const mpfr::mpreal& x) { return int(x.toLong()); } // Specialize GEBP kernel and traits for mpreal (no need for peeling, nor complicated stuff) // This also permits to directly call mpfr's routines and avoid many temporaries produced by mpreal template<> class gebp_traits { public: typedef mpfr::mpreal ResScalar; enum { nr = 2, // must be 2 for proper packing... mr = 1, WorkSpaceFactor = nr, LhsProgress = 1, RhsProgress = 1 }; }; template struct gebp_kernel { typedef mpfr::mpreal mpreal; EIGEN_DONT_INLINE void operator()(mpreal* res, Index resStride, const mpreal* blockA, const mpreal* blockB, Index rows, Index depth, Index cols, mpreal alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, mpreal* /*unpackedB*/ = 0) { mpreal acc1, acc2, tmp; if(strideA==-1) strideA = depth; if(strideB==-1) strideB = depth; for(Index j=0; j)(nr,cols-j); mpreal *C1 = res + j*resStride; mpreal *C2 = res + (j+1)*resStride; for(Index i=0; i(blockB) + j*strideB + offsetB*actual_nr; mpreal *A = const_cast(blockA) + i*strideA + offsetA; acc1 = 0; acc2 = 0; for(Index k=0; k // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LEVENBERGMARQUARDT_MODULE #define EIGEN_LEVENBERGMARQUARDT_MODULE // #include #include #include #include #include #include /** * \defgroup LevenbergMarquardt_Module Levenberg-Marquardt module * * \code * #include * \endcode * * */ #include "Eigen/SparseCore" #ifndef EIGEN_PARSED_BY_DOXYGEN #include "src/LevenbergMarquardt/LMqrsolv.h" #include "src/LevenbergMarquardt/LMcovar.h" #include "src/LevenbergMarquardt/LMpar.h" #endif #include "src/LevenbergMarquardt/LevenbergMarquardt.h" #include "src/LevenbergMarquardt/LMonestep.h" #endif // EIGEN_LEVENBERGMARQUARDT_MODULE RcppEigen/inst/include/unsupported/Eigen/IterativeSolvers0000644000176200001440000000256212253717461023417 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ITERATIVE_SOLVERS_MODULE_H #define EIGEN_ITERATIVE_SOLVERS_MODULE_H #include /** * \defgroup IterativeSolvers_Module Iterative solvers module * This module aims to provide various iterative linear and non linear solver algorithms. * It currently provides: * - a constrained conjugate gradient * - a Householder GMRES implementation * \code * #include * \endcode */ //@{ #include "../../Eigen/src/misc/Solve.h" #include "../../Eigen/src/misc/SparseSolve.h" #ifndef EIGEN_MPL2_ONLY #include "src/IterativeSolvers/IterationController.h" #include "src/IterativeSolvers/ConstrainedConjGrad.h" #endif #include "src/IterativeSolvers/IncompleteLU.h" #include "../../Eigen/Jacobi" #include "../../Eigen/Householder" #include "src/IterativeSolvers/GMRES.h" #include "src/IterativeSolvers/IncompleteCholesky.h" //#include "src/IterativeSolvers/SSORPreconditioner.h" #include "src/IterativeSolvers/MINRES.h" //@} #endif // EIGEN_ITERATIVE_SOLVERS_MODULE_H RcppEigen/inst/include/unsupported/Eigen/MatrixFunctions0000644000176200001440000003713712253717461023250 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Jitse Niesen // Copyright (C) 2012 Chen-Pang He // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_FUNCTIONS #define EIGEN_MATRIX_FUNCTIONS #include #include #include #include #include #include #include /** * \defgroup MatrixFunctions_Module Matrix functions module * \brief This module aims to provide various methods for the computation of * matrix functions. * * To use this module, add * \code * #include * \endcode * at the start of your source file. * * This module defines the following MatrixBase methods. * - \ref matrixbase_cos "MatrixBase::cos()", for computing the matrix cosine * - \ref matrixbase_cosh "MatrixBase::cosh()", for computing the matrix hyperbolic cosine * - \ref matrixbase_exp "MatrixBase::exp()", for computing the matrix exponential * - \ref matrixbase_log "MatrixBase::log()", for computing the matrix logarithm * - \ref matrixbase_pow "MatrixBase::pow()", for computing the matrix power * - \ref matrixbase_matrixfunction "MatrixBase::matrixFunction()", for computing general matrix functions * - \ref matrixbase_sin "MatrixBase::sin()", for computing the matrix sine * - \ref matrixbase_sinh "MatrixBase::sinh()", for computing the matrix hyperbolic sine * - \ref matrixbase_sqrt "MatrixBase::sqrt()", for computing the matrix square root * * These methods are the main entry points to this module. * * %Matrix functions are defined as follows. Suppose that \f$ f \f$ * is an entire function (that is, a function on the complex plane * that is everywhere complex differentiable). Then its Taylor * series * \f[ f(0) + f'(0) x + \frac{f''(0)}{2} x^2 + \frac{f'''(0)}{3!} x^3 + \cdots \f] * converges to \f$ f(x) \f$. In this case, we can define the matrix * function by the same series: * \f[ f(M) = f(0) + f'(0) M + \frac{f''(0)}{2} M^2 + \frac{f'''(0)}{3!} M^3 + \cdots \f] * */ #include "src/MatrixFunctions/MatrixExponential.h" #include "src/MatrixFunctions/MatrixFunction.h" #include "src/MatrixFunctions/MatrixSquareRoot.h" #include "src/MatrixFunctions/MatrixLogarithm.h" #include "src/MatrixFunctions/MatrixPower.h" /** \page matrixbaseextra_page \ingroup MatrixFunctions_Module \section matrixbaseextra MatrixBase methods defined in the MatrixFunctions module The remainder of the page documents the following MatrixBase methods which are defined in the MatrixFunctions module. \subsection matrixbase_cos MatrixBase::cos() Compute the matrix cosine. \code const MatrixFunctionReturnValue MatrixBase::cos() const \endcode \param[in] M a square matrix. \returns expression representing \f$ \cos(M) \f$. This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos(). \sa \ref matrixbase_sin "sin()" for an example. \subsection matrixbase_cosh MatrixBase::cosh() Compute the matrix hyberbolic cosine. \code const MatrixFunctionReturnValue MatrixBase::cosh() const \endcode \param[in] M a square matrix. \returns expression representing \f$ \cosh(M) \f$ This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cosh(). \sa \ref matrixbase_sinh "sinh()" for an example. \subsection matrixbase_exp MatrixBase::exp() Compute the matrix exponential. \code const MatrixExponentialReturnValue MatrixBase::exp() const \endcode \param[in] M matrix whose exponential is to be computed. \returns expression representing the matrix exponential of \p M. The matrix exponential of \f$ M \f$ is defined by \f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f] The matrix exponential can be used to solve linear ordinary differential equations: the solution of \f$ y' = My \f$ with the initial condition \f$ y(0) = y_0 \f$ is given by \f$ y(t) = \exp(M) y_0 \f$. The cost of the computation is approximately \f$ 20 n^3 \f$ for matrices of size \f$ n \f$. The number 20 depends weakly on the norm of the matrix. The matrix exponential is computed using the scaling-and-squaring method combined with Padé approximation. The matrix is first rescaled, then the exponential of the reduced matrix is computed approximant, and then the rescaling is undone by repeated squaring. The degree of the Padé approximant is chosen such that the approximation error is less than the round-off error. However, errors may accumulate during the squaring phase. Details of the algorithm can be found in: Nicholas J. Higham, "The scaling and squaring method for the matrix exponential revisited," SIAM J. %Matrix Anal. Applic., 26:1179–1193, 2005. Example: The following program checks that \f[ \exp \left[ \begin{array}{ccc} 0 & \frac14\pi & 0 \\ -\frac14\pi & 0 & 0 \\ 0 & 0 & 0 \end{array} \right] = \left[ \begin{array}{ccc} \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ 0 & 0 & 1 \end{array} \right]. \f] This corresponds to a rotation of \f$ \frac14\pi \f$ radians around the z-axis. \include MatrixExponential.cpp Output: \verbinclude MatrixExponential.out \note \p M has to be a matrix of \c float, \c double, \c long double \c complex, \c complex, or \c complex . \subsection matrixbase_log MatrixBase::log() Compute the matrix logarithm. \code const MatrixLogarithmReturnValue MatrixBase::log() const \endcode \param[in] M invertible matrix whose logarithm is to be computed. \returns expression representing the matrix logarithm root of \p M. The matrix logarithm of \f$ M \f$ is a matrix \f$ X \f$ such that \f$ \exp(X) = M \f$ where exp denotes the matrix exponential. As for the scalar logarithm, the equation \f$ \exp(X) = M \f$ may have multiple solutions; this function returns a matrix whose eigenvalues have imaginary part in the interval \f$ (-\pi,\pi] \f$. In the real case, the matrix \f$ M \f$ should be invertible and it should have no eigenvalues which are real and negative (pairs of complex conjugate eigenvalues are allowed). In the complex case, it only needs to be invertible. This function computes the matrix logarithm using the Schur-Parlett algorithm as implemented by MatrixBase::matrixFunction(). The logarithm of an atomic block is computed by MatrixLogarithmAtomic, which uses direct computation for 1-by-1 and 2-by-2 blocks and an inverse scaling-and-squaring algorithm for bigger blocks, with the square roots computed by MatrixBase::sqrt(). Details of the algorithm can be found in Section 11.6.2 of: Nicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008. ISBN 978-0-898716-46-7. Example: The following program checks that \f[ \log \left[ \begin{array}{ccc} \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ 0 & 0 & 1 \end{array} \right] = \left[ \begin{array}{ccc} 0 & \frac14\pi & 0 \\ -\frac14\pi & 0 & 0 \\ 0 & 0 & 0 \end{array} \right]. \f] This corresponds to a rotation of \f$ \frac14\pi \f$ radians around the z-axis. This is the inverse of the example used in the documentation of \ref matrixbase_exp "exp()". \include MatrixLogarithm.cpp Output: \verbinclude MatrixLogarithm.out \note \p M has to be a matrix of \c float, \c double, long double, \c complex, \c complex, or \c complex . \sa MatrixBase::exp(), MatrixBase::matrixFunction(), class MatrixLogarithmAtomic, MatrixBase::sqrt(). \subsection matrixbase_pow MatrixBase::pow() Compute the matrix raised to arbitrary real power. \code const MatrixPowerReturnValue MatrixBase::pow(RealScalar p) const \endcode \param[in] M base of the matrix power, should be a square matrix. \param[in] p exponent of the matrix power, should be real. The matrix power \f$ M^p \f$ is defined as \f$ \exp(p \log(M)) \f$, where exp denotes the matrix exponential, and log denotes the matrix logarithm. The matrix \f$ M \f$ should meet the conditions to be an argument of matrix logarithm. If \p p is not of the real scalar type of \p M, it is casted into the real scalar type of \p M. This function computes the matrix power using the Schur-Padé algorithm as implemented by class MatrixPower. The exponent is split into integral part and fractional part, where the fractional part is in the interval \f$ (-1, 1) \f$. The main diagonal and the first super-diagonal is directly computed. Details of the algorithm can be found in: Nicholas J. Higham and Lijing Lin, "A Schur-Padé algorithm for fractional powers of a matrix," SIAM J. %Matrix Anal. Applic., 32(3):1056–1078, 2011. Example: The following program checks that \f[ \left[ \begin{array}{ccc} \cos1 & -\sin1 & 0 \\ \sin1 & \cos1 & 0 \\ 0 & 0 & 1 \end{array} \right]^{\frac14\pi} = \left[ \begin{array}{ccc} \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ 0 & 0 & 1 \end{array} \right]. \f] This corresponds to \f$ \frac14\pi \f$ rotations of 1 radian around the z-axis. \include MatrixPower.cpp Output: \verbinclude MatrixPower.out MatrixBase::pow() is user-friendly. However, there are some circumstances under which you should use class MatrixPower directly. MatrixPower can save the result of Schur decomposition, so it's better for computing various powers for the same matrix. Example: \include MatrixPower_optimal.cpp Output: \verbinclude MatrixPower_optimal.out \note \p M has to be a matrix of \c float, \c double, long double, \c complex, \c complex, or \c complex . \sa MatrixBase::exp(), MatrixBase::log(), class MatrixPower. \subsection matrixbase_matrixfunction MatrixBase::matrixFunction() Compute a matrix function. \code const MatrixFunctionReturnValue MatrixBase::matrixFunction(typename internal::stem_function::Scalar>::type f) const \endcode \param[in] M argument of matrix function, should be a square matrix. \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x. \returns expression representing \p f applied to \p M. Suppose that \p M is a matrix whose entries have type \c Scalar. Then, the second argument, \p f, should be a function with prototype \code ComplexScalar f(ComplexScalar, int) \endcode where \c ComplexScalar = \c std::complex if \c Scalar is real (e.g., \c float or \c double) and \c ComplexScalar = \c Scalar if \c Scalar is complex. The return value of \c f(x,n) should be \f$ f^{(n)}(x) \f$, the n-th derivative of f at x. This routine uses the algorithm described in: Philip Davies and Nicholas J. Higham, "A Schur-Parlett algorithm for computing matrix functions", SIAM J. %Matrix Anal. Applic., 25:464–485, 2003. The actual work is done by the MatrixFunction class. Example: The following program checks that \f[ \exp \left[ \begin{array}{ccc} 0 & \frac14\pi & 0 \\ -\frac14\pi & 0 & 0 \\ 0 & 0 & 0 \end{array} \right] = \left[ \begin{array}{ccc} \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ 0 & 0 & 1 \end{array} \right]. \f] This corresponds to a rotation of \f$ \frac14\pi \f$ radians around the z-axis. This is the same example as used in the documentation of \ref matrixbase_exp "exp()". \include MatrixFunction.cpp Output: \verbinclude MatrixFunction.out Note that the function \c expfn is defined for complex numbers \c x, even though the matrix \c A is over the reals. Instead of \c expfn, we could also have used StdStemFunctions::exp: \code A.matrixFunction(StdStemFunctions >::exp, &B); \endcode \subsection matrixbase_sin MatrixBase::sin() Compute the matrix sine. \code const MatrixFunctionReturnValue MatrixBase::sin() const \endcode \param[in] M a square matrix. \returns expression representing \f$ \sin(M) \f$. This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin(). Example: \include MatrixSine.cpp Output: \verbinclude MatrixSine.out \subsection matrixbase_sinh MatrixBase::sinh() Compute the matrix hyperbolic sine. \code MatrixFunctionReturnValue MatrixBase::sinh() const \endcode \param[in] M a square matrix. \returns expression representing \f$ \sinh(M) \f$ This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sinh(). Example: \include MatrixSinh.cpp Output: \verbinclude MatrixSinh.out \subsection matrixbase_sqrt MatrixBase::sqrt() Compute the matrix square root. \code const MatrixSquareRootReturnValue MatrixBase::sqrt() const \endcode \param[in] M invertible matrix whose square root is to be computed. \returns expression representing the matrix square root of \p M. The matrix square root of \f$ M \f$ is the matrix \f$ M^{1/2} \f$ whose square is the original matrix; so if \f$ S = M^{1/2} \f$ then \f$ S^2 = M \f$. In the real case, the matrix \f$ M \f$ should be invertible and it should have no eigenvalues which are real and negative (pairs of complex conjugate eigenvalues are allowed). In that case, the matrix has a square root which is also real, and this is the square root computed by this function. The matrix square root is computed by first reducing the matrix to quasi-triangular form with the real Schur decomposition. The square root of the quasi-triangular matrix can then be computed directly. The cost is approximately \f$ 25 n^3 \f$ real flops for the real Schur decomposition and \f$ 3\frac13 n^3 \f$ real flops for the remainder (though the computation time in practice is likely more than this indicates). Details of the algorithm can be found in: Nicholas J. Highan, "Computing real square roots of a real matrix", Linear Algebra Appl., 88/89:405–430, 1987. If the matrix is positive-definite symmetric, then the square root is also positive-definite symmetric. In this case, it is best to use SelfAdjointEigenSolver::operatorSqrt() to compute it. In the complex case, the matrix \f$ M \f$ should be invertible; this is a restriction of the algorithm. The square root computed by this algorithm is the one whose eigenvalues have an argument in the interval \f$ (-\frac12\pi, \frac12\pi] \f$. This is the usual branch cut. The computation is the same as in the real case, except that the complex Schur decomposition is used to reduce the matrix to a triangular matrix. The theoretical cost is the same. Details are in: Åke Björck and Sven Hammarling, "A Schur method for the square root of a matrix", Linear Algebra Appl., 52/53:127–140, 1983. Example: The following program checks that the square root of \f[ \left[ \begin{array}{cc} \cos(\frac13\pi) & -\sin(\frac13\pi) \\ \sin(\frac13\pi) & \cos(\frac13\pi) \end{array} \right], \f] corresponding to a rotation over 60 degrees, is a rotation over 30 degrees: \f[ \left[ \begin{array}{cc} \cos(\frac16\pi) & -\sin(\frac16\pi) \\ \sin(\frac16\pi) & \cos(\frac16\pi) \end{array} \right]. \f] \include MatrixSquareRoot.cpp Output: \verbinclude MatrixSquareRoot.out \sa class RealSchur, class ComplexSchur, class MatrixSquareRoot, SelfAdjointEigenSolver::operatorSqrt(). */ #endif // EIGEN_MATRIX_FUNCTIONS RcppEigen/inst/include/unsupported/Eigen/AutoDiff0000644000176200001440000000203612253717461021602 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_AUTODIFF_MODULE #define EIGEN_AUTODIFF_MODULE namespace Eigen { /** * \defgroup AutoDiff_Module Auto Diff module * * This module features forward automatic differentation via a simple * templated scalar type wrapper AutoDiffScalar. * * Warning : this should NOT be confused with numerical differentiation, which * is a different method and has its own module in Eigen : \ref NumericalDiff_Module. * * \code * #include * \endcode */ //@{ } #include "src/AutoDiff/AutoDiffScalar.h" // #include "src/AutoDiff/AutoDiffVector.h" #include "src/AutoDiff/AutoDiffJacobian.h" namespace Eigen { //@} } #endif // EIGEN_AUTODIFF_MODULE RcppEigen/inst/include/unsupported/Eigen/ArpackSupport0000644000176200001440000000161112446006401022662 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARPACKSUPPORT_MODULE_H #define EIGEN_ARPACKSUPPORT_MODULE_H #include #include /** \defgroup ArpackSupport_Module Arpack support module * * This module provides a wrapper to Arpack, a library for sparse eigenvalue decomposition. * * \code * #include * \endcode */ #include #include "src/Eigenvalues/ArpackSelfAdjointEigenSolver.h" #include #endif // EIGEN_ARPACKSUPPORT_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ RcppEigen/inst/include/unsupported/Eigen/NonLinearOptimization0000644000176200001440000001322012253717461024372 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Thomas Capricelli // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NONLINEAROPTIMIZATION_MODULE #define EIGEN_NONLINEAROPTIMIZATION_MODULE #include #include #include #include #include /** * \defgroup NonLinearOptimization_Module Non linear optimization module * * \code * #include * \endcode * * This module provides implementation of two important algorithms in non linear * optimization. In both cases, we consider a system of non linear functions. Of * course, this should work, and even work very well if those functions are * actually linear. But if this is so, you should probably better use other * methods more fitted to this special case. * * One algorithm allows to find an extremum of such a system (Levenberg * Marquardt algorithm) and the second one is used to find * a zero for the system (Powell hybrid "dogleg" method). * * This code is a port of minpack (http://en.wikipedia.org/wiki/MINPACK). * Minpack is a very famous, old, robust and well-reknown package, written in * fortran. Those implementations have been carefully tuned, tested, and used * for several decades. * * The original fortran code was automatically translated using f2c (http://en.wikipedia.org/wiki/F2c) in C, * then c++, and then cleaned by several different authors. * The last one of those cleanings being our starting point : * http://devernay.free.fr/hacks/cminpack.html * * Finally, we ported this code to Eigen, creating classes and API * coherent with Eigen. When possible, we switched to Eigen * implementation, such as most linear algebra (vectors, matrices, stable norms). * * Doing so, we were very careful to check the tests we setup at the very * beginning, which ensure that the same results are found. * * \section Tests Tests * * The tests are placed in the file unsupported/test/NonLinear.cpp. * * There are two kinds of tests : those that come from examples bundled with cminpack. * They guaranty we get the same results as the original algorithms (value for 'x', * for the number of evaluations of the function, and for the number of evaluations * of the jacobian if ever). * * Other tests were added by myself at the very beginning of the * process and check the results for levenberg-marquardt using the reference data * on http://www.itl.nist.gov/div898/strd/nls/nls_main.shtml. Since then i've * carefully checked that the same results were obtained when modifiying the * code. Please note that we do not always get the exact same decimals as they do, * but this is ok : they use 128bits float, and we do the tests using the C type 'double', * which is 64 bits on most platforms (x86 and amd64, at least). * I've performed those tests on several other implementations of levenberg-marquardt, and * (c)minpack performs VERY well compared to those, both in accuracy and speed. * * The documentation for running the tests is on the wiki * http://eigen.tuxfamily.org/index.php?title=Tests * * \section API API : overview of methods * * Both algorithms can use either the jacobian (provided by the user) or compute * an approximation by themselves (actually using Eigen \ref NumericalDiff_Module). * The part of API referring to the latter use 'NumericalDiff' in the method names * (exemple: LevenbergMarquardt.minimizeNumericalDiff() ) * * The methods LevenbergMarquardt.lmder1()/lmdif1()/lmstr1() and * HybridNonLinearSolver.hybrj1()/hybrd1() are specific methods from the original * minpack package that you probably should NOT use until you are porting a code that * was previously using minpack. They just define a 'simple' API with default values * for some parameters. * * All algorithms are provided using Two APIs : * - one where the user inits the algorithm, and uses '*OneStep()' as much as he wants : * this way the caller have control over the steps * - one where the user just calls a method (optimize() or solve()) which will * handle the loop: init + loop until a stop condition is met. Those are provided for * convenience. * * As an example, the method LevenbergMarquardt::minimize() is * implemented as follow : * \code * Status LevenbergMarquardt::minimize(FVectorType &x, const int mode) * { * Status status = minimizeInit(x, mode); * do { * status = minimizeOneStep(x, mode); * } while (status==Running); * return status; * } * \endcode * * \section examples Examples * * The easiest way to understand how to use this module is by looking at the many examples in the file * unsupported/test/NonLinearOptimization.cpp. */ #ifndef EIGEN_PARSED_BY_DOXYGEN #include "src/NonLinearOptimization/qrsolv.h" #include "src/NonLinearOptimization/r1updt.h" #include "src/NonLinearOptimization/r1mpyq.h" #include "src/NonLinearOptimization/rwupdt.h" #include "src/NonLinearOptimization/fdjac1.h" #include "src/NonLinearOptimization/lmpar.h" #include "src/NonLinearOptimization/dogleg.h" #include "src/NonLinearOptimization/covar.h" #include "src/NonLinearOptimization/chkder.h" #endif #include "src/NonLinearOptimization/HybridNonLinearSolver.h" #include "src/NonLinearOptimization/LevenbergMarquardt.h" #endif // EIGEN_NONLINEAROPTIMIZATION_MODULE RcppEigen/inst/include/unsupported/Eigen/Polynomials0000644000176200001440000001124212253717461022406 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_POLYNOMIALS_MODULE_H #define EIGEN_POLYNOMIALS_MODULE_H #include #include #include // Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module #if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2) #ifndef EIGEN_HIDE_HEAVY_CODE #define EIGEN_HIDE_HEAVY_CODE #endif #elif defined EIGEN_HIDE_HEAVY_CODE #undef EIGEN_HIDE_HEAVY_CODE #endif /** * \defgroup Polynomials_Module Polynomials module * \brief This module provides a QR based polynomial solver. * * To use this module, add * \code * #include * \endcode * at the start of your source file. */ #include "src/Polynomials/PolynomialUtils.h" #include "src/Polynomials/Companion.h" #include "src/Polynomials/PolynomialSolver.h" /** \page polynomials Polynomials defines functions for dealing with polynomials and a QR based polynomial solver. \ingroup Polynomials_Module The remainder of the page documents first the functions for evaluating, computing polynomials, computing estimates about polynomials and next the QR based polynomial solver. \section polynomialUtils convenient functions to deal with polynomials \subsection roots_to_monicPolynomial The function \code void roots_to_monicPolynomial( const RootVector& rv, Polynomial& poly ) \endcode computes the coefficients \f$ a_i \f$ of \f$ p(x) = a_0 + a_{1}x + ... + a_{n-1}x^{n-1} + x^n \f$ where \f$ p \f$ is known through its roots i.e. \f$ p(x) = (x-r_1)(x-r_2)...(x-r_n) \f$. \subsection poly_eval The function \code T poly_eval( const Polynomials& poly, const T& x ) \endcode evaluates a polynomial at a given point using stabilized Hörner method. The following code: first computes the coefficients in the monomial basis of the monic polynomial that has the provided roots; then, it evaluates the computed polynomial, using a stabilized Hörner method. \include PolynomialUtils1.cpp Output: \verbinclude PolynomialUtils1.out \subsection Cauchy bounds The function \code Real cauchy_max_bound( const Polynomial& poly ) \endcode provides a maximum bound (the Cauchy one: \f$C(p)\f$) for the absolute value of a root of the given polynomial i.e. \f$ \forall r_i \f$ root of \f$ p(x) = \sum_{k=0}^d a_k x^k \f$, \f$ |r_i| \le C(p) = \sum_{k=0}^{d} \left | \frac{a_k}{a_d} \right | \f$ The leading coefficient \f$ p \f$: should be non zero \f$a_d \neq 0\f$. The function \code Real cauchy_min_bound( const Polynomial& poly ) \endcode provides a minimum bound (the Cauchy one: \f$c(p)\f$) for the absolute value of a non zero root of the given polynomial i.e. \f$ \forall r_i \neq 0 \f$ root of \f$ p(x) = \sum_{k=0}^d a_k x^k \f$, \f$ |r_i| \ge c(p) = \left( \sum_{k=0}^{d} \left | \frac{a_k}{a_0} \right | \right)^{-1} \f$ \section QR polynomial solver class Computes the complex roots of a polynomial by computing the eigenvalues of the associated companion matrix with the QR algorithm. The roots of \f$ p(x) = a_0 + a_1 x + a_2 x^2 + a_{3} x^3 + x^4 \f$ are the eigenvalues of \f$ \left [ \begin{array}{cccc} 0 & 0 & 0 & a_0 \\ 1 & 0 & 0 & a_1 \\ 0 & 1 & 0 & a_2 \\ 0 & 0 & 1 & a_3 \end{array} \right ] \f$ However, the QR algorithm is not guaranteed to converge when there are several eigenvalues with same modulus. Therefore the current polynomial solver is guaranteed to provide a correct result only when the complex roots \f$r_1,r_2,...,r_d\f$ have distinct moduli i.e. \f$ \forall i,j \in [1;d],~ \| r_i \| \neq \| r_j \| \f$. With 32bit (float) floating types this problem shows up frequently. However, almost always, correct accuracy is reached even in these cases for 64bit (double) floating types and small polynomial degree (<20). \include PolynomialSolver1.cpp In the above example: -# a simple use of the polynomial solver is shown; -# the accuracy problem with the QR algorithm is presented: a polynomial with almost conjugate roots is provided to the solver. Those roots have almost same module therefore the QR algorithm failed to converge: the accuracy of the last root is bad; -# a simple way to circumvent the problem is shown: use doubles instead of floats. Output: \verbinclude PolynomialSolver1.out */ #include #endif // EIGEN_POLYNOMIALS_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ RcppEigen/inst/include/unsupported/Eigen/FFT0000644000176200001440000003313412253717461020523 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Mark Borgerding mark a borgerding net // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_FFT_H #define EIGEN_FFT_H #include #include #include #include /** * \defgroup FFT_Module Fast Fourier Transform module * * \code * #include * \endcode * * This module provides Fast Fourier transformation, with a configurable backend * implementation. * * The default implementation is based on kissfft. It is a small, free, and * reasonably efficient default. * * There are currently two implementation backend: * * - fftw (http://www.fftw.org) : faster, GPL -- incompatible with Eigen in LGPL form, bigger code size. * - MKL (http://en.wikipedia.org/wiki/Math_Kernel_Library) : fastest, commercial -- may be incompatible with Eigen in GPL form. * * \section FFTDesign Design * * The following design decisions were made concerning scaling and * half-spectrum for real FFT. * * The intent is to facilitate generic programming and ease migrating code * from Matlab/octave. * We think the default behavior of Eigen/FFT should favor correctness and * generality over speed. Of course, the caller should be able to "opt-out" from this * behavior and get the speed increase if they want it. * * 1) %Scaling: * Other libraries (FFTW,IMKL,KISSFFT) do not perform scaling, so there * is a constant gain incurred after the forward&inverse transforms , so * IFFT(FFT(x)) = Kx; this is done to avoid a vector-by-value multiply. * The downside is that algorithms that worked correctly in Matlab/octave * don't behave the same way once implemented in C++. * * How Eigen/FFT differs: invertible scaling is performed so IFFT( FFT(x) ) = x. * * 2) Real FFT half-spectrum * Other libraries use only half the frequency spectrum (plus one extra * sample for the Nyquist bin) for a real FFT, the other half is the * conjugate-symmetric of the first half. This saves them a copy and some * memory. The downside is the caller needs to have special logic for the * number of bins in complex vs real. * * How Eigen/FFT differs: The full spectrum is returned from the forward * transform. This facilitates generic template programming by obviating * separate specializations for real vs complex. On the inverse * transform, only half the spectrum is actually used if the output type is real. */ #ifdef EIGEN_FFTW_DEFAULT // FFTW: faster, GPL -- incompatible with Eigen in LGPL form, bigger code size # include # include "src/FFT/ei_fftw_impl.h" namespace Eigen { //template typedef struct internal::fftw_impl default_fft_impl; this does not work template struct default_fft_impl : public internal::fftw_impl {}; } #elif defined EIGEN_MKL_DEFAULT // TODO // intel Math Kernel Library: fastest, commercial -- may be incompatible with Eigen in GPL form # include "src/FFT/ei_imklfft_impl.h" namespace Eigen { template struct default_fft_impl : public internal::imklfft_impl {}; } #else // internal::kissfft_impl: small, free, reasonably efficient default, derived from kissfft // # include "src/FFT/ei_kissfft_impl.h" namespace Eigen { template struct default_fft_impl : public internal::kissfft_impl {}; } #endif namespace Eigen { // template struct fft_fwd_proxy; template struct fft_inv_proxy; namespace internal { template struct traits< fft_fwd_proxy > { typedef typename T_SrcMat::PlainObject ReturnType; }; template struct traits< fft_inv_proxy > { typedef typename T_SrcMat::PlainObject ReturnType; }; } template struct fft_fwd_proxy : public ReturnByValue > { typedef DenseIndex Index; fft_fwd_proxy(const T_SrcMat& src,T_FftIfc & fft, Index nfft) : m_src(src),m_ifc(fft), m_nfft(nfft) {} template void evalTo(T_DestMat& dst) const; Index rows() const { return m_src.rows(); } Index cols() const { return m_src.cols(); } protected: const T_SrcMat & m_src; T_FftIfc & m_ifc; Index m_nfft; private: fft_fwd_proxy& operator=(const fft_fwd_proxy&); }; template struct fft_inv_proxy : public ReturnByValue > { typedef DenseIndex Index; fft_inv_proxy(const T_SrcMat& src,T_FftIfc & fft, Index nfft) : m_src(src),m_ifc(fft), m_nfft(nfft) {} template void evalTo(T_DestMat& dst) const; Index rows() const { return m_src.rows(); } Index cols() const { return m_src.cols(); } protected: const T_SrcMat & m_src; T_FftIfc & m_ifc; Index m_nfft; private: fft_inv_proxy& operator=(const fft_inv_proxy&); }; template > class FFT { public: typedef T_Impl impl_type; typedef DenseIndex Index; typedef typename impl_type::Scalar Scalar; typedef typename impl_type::Complex Complex; enum Flag { Default=0, // goof proof Unscaled=1, HalfSpectrum=2, // SomeOtherSpeedOptimization=4 Speedy=32767 }; FFT( const impl_type & impl=impl_type() , Flag flags=Default ) :m_impl(impl),m_flag(flags) { } inline bool HasFlag(Flag f) const { return (m_flag & (int)f) == f;} inline void SetFlag(Flag f) { m_flag |= (int)f;} inline void ClearFlag(Flag f) { m_flag &= (~(int)f);} inline void fwd( Complex * dst, const Scalar * src, Index nfft) { m_impl.fwd(dst,src,static_cast(nfft)); if ( HasFlag(HalfSpectrum) == false) ReflectSpectrum(dst,nfft); } inline void fwd( Complex * dst, const Complex * src, Index nfft) { m_impl.fwd(dst,src,static_cast(nfft)); } /* inline void fwd2(Complex * dst, const Complex * src, int n0,int n1) { m_impl.fwd2(dst,src,n0,n1); } */ template inline void fwd( std::vector & dst, const std::vector<_Input> & src) { if ( NumTraits<_Input>::IsComplex == 0 && HasFlag(HalfSpectrum) ) dst.resize( (src.size()>>1)+1); // half the bins + Nyquist bin else dst.resize(src.size()); fwd(&dst[0],&src[0],src.size()); } template inline void fwd( MatrixBase & dst, const MatrixBase & src, Index nfft=-1) { typedef typename ComplexDerived::Scalar dst_type; typedef typename InputDerived::Scalar src_type; EIGEN_STATIC_ASSERT_VECTOR_ONLY(InputDerived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(ComplexDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(ComplexDerived,InputDerived) // size at compile-time EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) EIGEN_STATIC_ASSERT(int(InputDerived::Flags)&int(ComplexDerived::Flags)&DirectAccessBit, THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES) if (nfft<1) nfft = src.size(); if ( NumTraits< src_type >::IsComplex == 0 && HasFlag(HalfSpectrum) ) dst.derived().resize( (nfft>>1)+1); else dst.derived().resize(nfft); if ( src.innerStride() != 1 || src.size() < nfft ) { Matrix tmp; if (src.size() inline fft_fwd_proxy< MatrixBase, FFT > fwd( const MatrixBase & src, Index nfft=-1) { return fft_fwd_proxy< MatrixBase ,FFT >( src, *this,nfft ); } template inline fft_inv_proxy< MatrixBase, FFT > inv( const MatrixBase & src, Index nfft=-1) { return fft_inv_proxy< MatrixBase ,FFT >( src, *this,nfft ); } inline void inv( Complex * dst, const Complex * src, Index nfft) { m_impl.inv( dst,src,static_cast(nfft) ); if ( HasFlag( Unscaled ) == false) scale(dst,Scalar(1./nfft),nfft); // scale the time series } inline void inv( Scalar * dst, const Complex * src, Index nfft) { m_impl.inv( dst,src,static_cast(nfft) ); if ( HasFlag( Unscaled ) == false) scale(dst,Scalar(1./nfft),nfft); // scale the time series } template inline void inv( MatrixBase & dst, const MatrixBase & src, Index nfft=-1) { typedef typename ComplexDerived::Scalar src_type; typedef typename OutputDerived::Scalar dst_type; const bool realfft= (NumTraits::IsComplex == 0); EIGEN_STATIC_ASSERT_VECTOR_ONLY(OutputDerived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(ComplexDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(ComplexDerived,OutputDerived) // size at compile-time EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) EIGEN_STATIC_ASSERT(int(OutputDerived::Flags)&int(ComplexDerived::Flags)&DirectAccessBit, THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES) if (nfft<1) { //automatic FFT size determination if ( realfft && HasFlag(HalfSpectrum) ) nfft = 2*(src.size()-1); //assume even fft size else nfft = src.size(); } dst.derived().resize( nfft ); // check for nfft that does not fit the input data size Index resize_input= ( realfft && HasFlag(HalfSpectrum) ) ? ( (nfft/2+1) - src.size() ) : ( nfft - src.size() ); if ( src.innerStride() != 1 || resize_input ) { // if the vector is strided, then we need to copy it to a packed temporary Matrix tmp; if ( resize_input ) { size_t ncopy = (std::min)(src.size(),src.size() + resize_input); tmp.setZero(src.size() + resize_input); if ( realfft && HasFlag(HalfSpectrum) ) { // pad at the Nyquist bin tmp.head(ncopy) = src.head(ncopy); tmp(ncopy-1) = real(tmp(ncopy-1)); // enforce real-only Nyquist bin }else{ size_t nhead,ntail; nhead = 1+ncopy/2-1; // range [0:pi) ntail = ncopy/2-1; // range (-pi:0) tmp.head(nhead) = src.head(nhead); tmp.tail(ntail) = src.tail(ntail); if (resize_input<0) { //shrinking -- create the Nyquist bin as the average of the two bins that fold into it tmp(nhead) = ( src(nfft/2) + src( src.size() - nfft/2 ) )*src_type(.5); }else{ // expanding -- split the old Nyquist bin into two halves tmp(nhead) = src(nhead) * src_type(.5); tmp(tmp.size()-nhead) = tmp(nhead); } } }else{ tmp = src; } inv( &dst[0],&tmp[0], nfft); }else{ inv( &dst[0],&src[0], nfft); } } template inline void inv( std::vector<_Output> & dst, const std::vector & src,Index nfft=-1) { if (nfft<1) nfft = ( NumTraits<_Output>::IsComplex == 0 && HasFlag(HalfSpectrum) ) ? 2*(src.size()-1) : src.size(); dst.resize( nfft ); inv( &dst[0],&src[0],nfft); } /* // TODO: multi-dimensional FFTs inline void inv2(Complex * dst, const Complex * src, int n0,int n1) { m_impl.inv2(dst,src,n0,n1); if ( HasFlag( Unscaled ) == false) scale(dst,1./(n0*n1),n0*n1); } */ inline impl_type & impl() {return m_impl;} private: template inline void scale(T_Data * x,Scalar s,Index nx) { #if 1 for (int k=0;k::Map(x,nx) *= s; else Matrix::MapAligned(x,nx) *= s; //Matrix::Map(x,nx) * s; #endif } inline void ReflectSpectrum(Complex * freq, Index nfft) { // create the implicit right-half spectrum (conjugate-mirror of the left-half) Index nhbins=(nfft>>1)+1; for (Index k=nhbins;k < nfft; ++k ) freq[k] = conj(freq[nfft-k]); } impl_type m_impl; int m_flag; }; template template inline void fft_fwd_proxy::evalTo(T_DestMat& dst) const { m_ifc.fwd( dst, m_src, m_nfft); } template template inline void fft_inv_proxy::evalTo(T_DestMat& dst) const { m_ifc.inv( dst, m_src, m_nfft); } } #endif /* vim: set filetype=cpp et sw=2 ts=2 ai: */ RcppEigen/inst/include/unsupported/Eigen/SparseExtra0000644000176200001440000000260312253717461022342 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_EXTRA_MODULE_H #define EIGEN_SPARSE_EXTRA_MODULE_H #include "../../Eigen/Sparse" #include "../../Eigen/src/Core/util/DisableStupidWarnings.h" #include #include #include #include #include #include #include #ifdef EIGEN_GOOGLEHASH_SUPPORT #include #endif /** * \defgroup SparseExtra_Module SparseExtra module * * This module contains some experimental features extending the sparse module. * * \code * #include * \endcode */ #include "../../Eigen/src/misc/Solve.h" #include "../../Eigen/src/misc/SparseSolve.h" #include "src/SparseExtra/DynamicSparseMatrix.h" #include "src/SparseExtra/BlockOfDynamicSparseMatrix.h" #include "src/SparseExtra/RandomSetter.h" #include "src/SparseExtra/MarketIO.h" #if !defined(_WIN32) #include #include "src/SparseExtra/MatrixMarketIterator.h" #endif #include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SPARSE_EXTRA_MODULE_H RcppEigen/inst/include/unsupported/Eigen/Skyline0000644000176200001440000000162012253717461021515 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SKYLINE_MODULE_H #define EIGEN_SKYLINE_MODULE_H #include "Eigen/Core" #include "Eigen/src/Core/util/DisableStupidWarnings.h" #include #include #include #include /** * \defgroup Skyline_Module Skyline module * * * * */ #include "src/Skyline/SkylineUtil.h" #include "src/Skyline/SkylineMatrixBase.h" #include "src/Skyline/SkylineStorage.h" #include "src/Skyline/SkylineMatrix.h" #include "src/Skyline/SkylineInplaceLU.h" #include "src/Skyline/SkylineProduct.h" #include "Eigen/src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SKYLINE_MODULE_H RcppEigen/inst/include/unsupported/Eigen/OpenGLSupport0000644000176200001440000004515212446006401022615 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_OPENGL_MODULE #define EIGEN_OPENGL_MODULE #include #if defined(__APPLE_CC__) #include #else #include #endif namespace Eigen { /** * \defgroup OpenGLSUpport_Module OpenGL Support module * * This module provides wrapper functions for a couple of OpenGL functions * which simplify the way to pass Eigen's object to openGL. * Here is an exmaple: * * \code * // You need to add path_to_eigen/unsupported to your include path. * #include * // ... * Vector3f x, y; * Matrix3f rot; * * glVertex(y + x * rot); * * Quaternion q; * glRotate(q); * * // ... * \endcode * */ //@{ #define EIGEN_GL_FUNC_DECLARATION(FUNC) \ namespace internal { \ template< typename XprType, \ typename Scalar = typename XprType::Scalar, \ int Rows = XprType::RowsAtCompileTime, \ int Cols = XprType::ColsAtCompileTime, \ bool IsGLCompatible = bool(XprType::Flags&LinearAccessBit) \ && bool(XprType::Flags&DirectAccessBit) \ && (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \ struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \ \ template \ struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(const XprType& p) { \ EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)::type>::run(p); } \ }; \ } \ \ template inline void FUNC(const Eigen::DenseBase& p) { \ EIGEN_CAT(EIGEN_CAT(internal::gl_,FUNC),_impl)::run(p.derived()); \ } #define EIGEN_GL_FUNC_SPECIALIZATION_MAT(FUNC,SCALAR,ROWS,COLS,SUFFIX) \ namespace internal { \ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \ }; \ } #define EIGEN_GL_FUNC_SPECIALIZATION_VEC(FUNC,SCALAR,SIZE,SUFFIX) \ namespace internal { \ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \ }; \ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \ }; \ } EIGEN_GL_FUNC_DECLARATION (glVertex) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 2,2iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 2,2sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 2,2fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 2,2dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 3,3iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 3,3sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 3,3fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 3,3dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 4,4iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 4,4sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 4,4fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 4,4dv) EIGEN_GL_FUNC_DECLARATION (glTexCoord) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 2,2iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 2,2sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 2,2fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 2,2dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 3,3iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 3,3sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 3,3fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 3,3dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 4,4iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 4,4sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 4,4fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 4,4dv) EIGEN_GL_FUNC_DECLARATION (glColor) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 2,2iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 2,2sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 2,2fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 2,2dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 3,3iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 3,3sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 3,3fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 3,3dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 4,4iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 4,4sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 4,4fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 4,4dv) EIGEN_GL_FUNC_DECLARATION (glNormal) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,int, 3,3iv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,short, 3,3sv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,float, 3,3fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,double, 3,3dv) inline void glScale2fv(const float* v) { glScalef(v[0], v[1], 1.f); } inline void glScale2dv(const double* v) { glScaled(v[0], v[1], 1.0); } inline void glScale3fv(const float* v) { glScalef(v[0], v[1], v[2]); } inline void glScale3dv(const double* v) { glScaled(v[0], v[1], v[2]); } EIGEN_GL_FUNC_DECLARATION (glScale) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,float, 2,2fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,double, 2,2dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,float, 3,3fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,double, 3,3dv) template void glScale(const UniformScaling& s) { glScale(Matrix::Constant(s.factor())); } inline void glTranslate2fv(const float* v) { glTranslatef(v[0], v[1], 0.f); } inline void glTranslate2dv(const double* v) { glTranslated(v[0], v[1], 0.0); } inline void glTranslate3fv(const float* v) { glTranslatef(v[0], v[1], v[2]); } inline void glTranslate3dv(const double* v) { glTranslated(v[0], v[1], v[2]); } EIGEN_GL_FUNC_DECLARATION (glTranslate) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,float, 2,2fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,double, 2,2dv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,float, 3,3fv) EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,double, 3,3dv) template void glTranslate(const Translation& t) { glTranslate(t.vector()); } template void glTranslate(const Translation& t) { glTranslate(t.vector()); } EIGEN_GL_FUNC_DECLARATION (glMultMatrix) EIGEN_GL_FUNC_SPECIALIZATION_MAT(glMultMatrix,float, 4,4,f) EIGEN_GL_FUNC_SPECIALIZATION_MAT(glMultMatrix,double, 4,4,d) template void glMultMatrix(const Transform& t) { glMultMatrix(t.matrix()); } template void glMultMatrix(const Transform& t) { glMultMatrix(t.matrix()); } template void glMultMatrix(const Transform& t) { glMultMatrix(Transform(t).matrix()); } EIGEN_GL_FUNC_DECLARATION (glLoadMatrix) EIGEN_GL_FUNC_SPECIALIZATION_MAT(glLoadMatrix,float, 4,4,f) EIGEN_GL_FUNC_SPECIALIZATION_MAT(glLoadMatrix,double, 4,4,d) template void glLoadMatrix(const Transform& t) { glLoadMatrix(t.matrix()); } template void glLoadMatrix(const Transform& t) { glLoadMatrix(t.matrix()); } template void glLoadMatrix(const Transform& t) { glLoadMatrix(Transform(t).matrix()); } inline void glRotate(const Rotation2D& rot) { glRotatef(rot.angle()*180.f/float(M_PI), 0.f, 0.f, 1.f); } inline void glRotate(const Rotation2D& rot) { glRotated(rot.angle()*180.0/M_PI, 0.0, 0.0, 1.0); } template void glRotate(const RotationBase& rot) { Transform tr(rot); glMultMatrix(tr.matrix()); } #define EIGEN_GL_MAKE_CONST_const const #define EIGEN_GL_MAKE_CONST__ #define EIGEN_GL_EVAL(X) X #define EIGEN_GL_FUNC1_DECLARATION(FUNC,ARG1,CONST) \ namespace internal { \ template< typename XprType, \ typename Scalar = typename XprType::Scalar, \ int Rows = XprType::RowsAtCompileTime, \ int Cols = XprType::ColsAtCompileTime, \ bool IsGLCompatible = bool(XprType::Flags&LinearAccessBit) \ && bool(XprType::Flags&DirectAccessBit) \ && (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \ struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \ \ template \ struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(ARG1 a,EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { \ EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)::type>::run(a,p); } \ }; \ } \ \ template inline void FUNC(ARG1 a,EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) Eigen::DenseBase& p) { \ EIGEN_CAT(EIGEN_CAT(internal::gl_,FUNC),_impl)::run(a,p.derived()); \ } #define EIGEN_GL_FUNC1_SPECIALIZATION_MAT(FUNC,ARG1,CONST,SCALAR,ROWS,COLS,SUFFIX) \ namespace internal { \ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \ }; \ } #define EIGEN_GL_FUNC1_SPECIALIZATION_VEC(FUNC,ARG1,CONST,SCALAR,SIZE,SUFFIX) \ namespace internal { \ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \ }; \ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \ }; \ } EIGEN_GL_FUNC1_DECLARATION (glGet,GLenum,_) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,float, 4,4,Floatv) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,double, 4,4,Doublev) // glUniform API #ifdef GL_VERSION_2_0 inline void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); } inline void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); } inline void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); } inline void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); } inline void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); } inline void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); } inline void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); } inline void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); } inline void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); } EIGEN_GL_FUNC1_DECLARATION (glUniform,GLint,const) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 2,2fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 2,2iv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 3,3fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 3,3iv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 4,4fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 4,4iv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,2,Matrix2fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,3,Matrix3fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,4,Matrix4fv_ei) #endif #ifdef GL_VERSION_2_1 static void glUniformMatrix2x3fv_ei(GLint loc, const float* v) { glUniformMatrix2x3fv(loc,1,false,v); } static void glUniformMatrix3x2fv_ei(GLint loc, const float* v) { glUniformMatrix3x2fv(loc,1,false,v); } static void glUniformMatrix2x4fv_ei(GLint loc, const float* v) { glUniformMatrix2x4fv(loc,1,false,v); } static void glUniformMatrix4x2fv_ei(GLint loc, const float* v) { glUniformMatrix4x2fv(loc,1,false,v); } static void glUniformMatrix3x4fv_ei(GLint loc, const float* v) { glUniformMatrix3x4fv(loc,1,false,v); } static void glUniformMatrix4x3fv_ei(GLint loc, const float* v) { glUniformMatrix4x3fv(loc,1,false,v); } EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,3,Matrix2x3fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,2,Matrix3x2fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,4,Matrix2x4fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,2,Matrix4x2fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,4,Matrix3x4fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,3,Matrix4x3fv_ei) #endif #ifdef GL_VERSION_3_0 inline void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); } inline void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); } inline void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); } EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 2,2uiv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 3,3uiv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 4,4uiv_ei) #endif #ifdef GL_ARB_gpu_shader_fp64 inline void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); } inline void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); } inline void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); } EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 2,2dv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 3,3dv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 4,4dv_ei) #endif //@} } #endif // EIGEN_OPENGL_MODULE RcppEigen/inst/include/unsupported/Eigen/AlignedVector30000644000176200001440000001207312446006401022701 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ALIGNED_VECTOR3 #define EIGEN_ALIGNED_VECTOR3 #include namespace Eigen { /** * \defgroup AlignedVector3_Module Aligned vector3 module * * \code * #include * \endcode */ //@{ /** \class AlignedVector3 * * \brief A vectorization friendly 3D vector * * This class represents a 3D vector internally using a 4D vector * such that vectorization can be seamlessly enabled. Of course, * the same result can be achieved by directly using a 4D vector. * This class makes this process simpler. * */ // TODO specialize Cwise template class AlignedVector3; namespace internal { template struct traits > : traits > { }; } template class AlignedVector3 : public MatrixBase > { typedef Matrix<_Scalar,4,1> CoeffType; CoeffType m_coeffs; public: typedef MatrixBase > Base; EIGEN_DENSE_PUBLIC_INTERFACE(AlignedVector3) using Base::operator*; inline Index rows() const { return 3; } inline Index cols() const { return 1; } inline const Scalar& coeff(Index row, Index col) const { return m_coeffs.coeff(row, col); } inline Scalar& coeffRef(Index row, Index col) { return m_coeffs.coeffRef(row, col); } inline const Scalar& coeff(Index index) const { return m_coeffs.coeff(index); } inline Scalar& coeffRef(Index index) { return m_coeffs.coeffRef(index);} inline AlignedVector3(const Scalar& x, const Scalar& y, const Scalar& z) : m_coeffs(x, y, z, Scalar(0)) {} inline AlignedVector3(const AlignedVector3& other) : Base(), m_coeffs(other.m_coeffs) {} template struct generic_assign_selector {}; template struct generic_assign_selector { inline static void run(AlignedVector3& dest, const XprType& src) { dest.m_coeffs = src; } }; template struct generic_assign_selector { inline static void run(AlignedVector3& dest, const XprType& src) { dest.m_coeffs.template head<3>() = src; dest.m_coeffs.w() = Scalar(0); } }; template inline explicit AlignedVector3(const MatrixBase& other) { generic_assign_selector::run(*this,other.derived()); } inline AlignedVector3& operator=(const AlignedVector3& other) { m_coeffs = other.m_coeffs; return *this; } inline AlignedVector3 operator+(const AlignedVector3& other) const { return AlignedVector3(m_coeffs + other.m_coeffs); } inline AlignedVector3& operator+=(const AlignedVector3& other) { m_coeffs += other.m_coeffs; return *this; } inline AlignedVector3 operator-(const AlignedVector3& other) const { return AlignedVector3(m_coeffs - other.m_coeffs); } inline AlignedVector3 operator-=(const AlignedVector3& other) { m_coeffs -= other.m_coeffs; return *this; } inline AlignedVector3 operator*(const Scalar& s) const { return AlignedVector3(m_coeffs * s); } inline friend AlignedVector3 operator*(const Scalar& s,const AlignedVector3& vec) { return AlignedVector3(s * vec.m_coeffs); } inline AlignedVector3& operator*=(const Scalar& s) { m_coeffs *= s; return *this; } inline AlignedVector3 operator/(const Scalar& s) const { return AlignedVector3(m_coeffs / s); } inline AlignedVector3& operator/=(const Scalar& s) { m_coeffs /= s; return *this; } inline Scalar dot(const AlignedVector3& other) const { eigen_assert(m_coeffs.w()==Scalar(0)); eigen_assert(other.m_coeffs.w()==Scalar(0)); return m_coeffs.dot(other.m_coeffs); } inline void normalize() { m_coeffs /= norm(); } inline AlignedVector3 normalized() { return AlignedVector3(m_coeffs / norm()); } inline Scalar sum() const { eigen_assert(m_coeffs.w()==Scalar(0)); return m_coeffs.sum(); } inline Scalar squaredNorm() const { eigen_assert(m_coeffs.w()==Scalar(0)); return m_coeffs.squaredNorm(); } inline Scalar norm() const { using std::sqrt; return sqrt(squaredNorm()); } inline AlignedVector3 cross(const AlignedVector3& other) const { return AlignedVector3(m_coeffs.cross3(other.m_coeffs)); } template inline bool isApprox(const MatrixBase& other, RealScalar eps=NumTraits::dummy_precision()) const { return m_coeffs.template head<3>().isApprox(other,eps); } }; //@} } #endif // EIGEN_ALIGNED_VECTOR3 RcppEigen/inst/include/unsupported/Eigen/SVD0000644000176200001440000000175512253717461020544 0ustar liggesusers#ifndef EIGEN_SVD_MODULE_H #define EIGEN_SVD_MODULE_H #include #include #include #include "../../Eigen/src/Core/util/DisableStupidWarnings.h" /** \defgroup SVD_Module SVD module * * * * This module provides SVD decomposition for matrices (both real and complex). * This decomposition is accessible via the following MatrixBase method: * - MatrixBase::jacobiSvd() * * \code * #include * \endcode */ #include "../../Eigen/src/misc/Solve.h" #include "../../Eigen/src/SVD/UpperBidiagonalization.h" #include "src/SVD/SVDBase.h" #include "src/SVD/JacobiSVD.h" #include "src/SVD/BDCSVD.h" #if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT) #include "../../Eigen/src/SVD/JacobiSVD_MKL.h" #endif #ifdef EIGEN2_SUPPORT #include "../../Eigen/src/Eigen2Support/SVD.h" #endif #include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SVD_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ RcppEigen/inst/include/unsupported/Eigen/BVH0000644000176200001440000001260112253717461020517 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Ilya Baran // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BVH_MODULE_H #define EIGEN_BVH_MODULE_H #include #include #include #include #include namespace Eigen { /** * \defgroup BVH_Module BVH module * \brief This module provides generic bounding volume hierarchy algorithms * and reference tree implementations. * * * \code * #include * \endcode * * A bounding volume hierarchy (BVH) can accelerate many geometric queries. This module provides a generic implementation * of the two basic algorithms over a BVH: intersection of a query object against all objects in the hierarchy and minimization * of a function over the objects in the hierarchy. It also provides intersection and minimization over a cartesian product of * two BVH's. A BVH accelerates intersection by using the fact that if a query object does not intersect a volume, then it cannot * intersect any object contained in that volume. Similarly, a BVH accelerates minimization because the minimum of a function * over a volume is no greater than the minimum of a function over any object contained in it. * * Some sample queries that can be written in terms of intersection are: * - Determine all points where a ray intersects a triangle mesh * - Given a set of points, determine which are contained in a query sphere * - Given a set of spheres, determine which contain the query point * - Given a set of disks, determine if any is completely contained in a query rectangle (represent each 2D disk as a point \f$(x,y,r)\f$ * in 3D and represent the rectangle as a pyramid based on the original rectangle and shrinking in the \f$r\f$ direction) * - Given a set of points, count how many pairs are \f$d\pm\epsilon\f$ apart (done by looking at the cartesian product of the set * of points with itself) * * Some sample queries that can be written in terms of function minimization over a set of objects are: * - Find the intersection between a ray and a triangle mesh closest to the ray origin (function is infinite off the ray) * - Given a polyline and a query point, determine the closest point on the polyline to the query * - Find the diameter of a point cloud (done by looking at the cartesian product and using negative distance as the function) * - Determine how far two meshes are from colliding (this is also a cartesian product query) * * This implementation decouples the basic algorithms both from the type of hierarchy (and the types of the bounding volumes) and * from the particulars of the query. To enable abstraction from the BVH, the BVH is required to implement a generic mechanism * for traversal. To abstract from the query, the query is responsible for keeping track of results. * * To be used in the algorithms, a hierarchy must implement the following traversal mechanism (see KdBVH for a sample implementation): \code typedef Volume //the type of bounding volume typedef Object //the type of object in the hierarchy typedef Index //a reference to a node in the hierarchy--typically an int or a pointer typedef VolumeIterator //an iterator type over node children--returns Index typedef ObjectIterator //an iterator over object (leaf) children--returns const Object & Index getRootIndex() const //returns the index of the hierarchy root const Volume &getVolume(Index index) const //returns the bounding volume of the node at given index void getChildren(Index index, VolumeIterator &outVBegin, VolumeIterator &outVEnd, ObjectIterator &outOBegin, ObjectIterator &outOEnd) const //getChildren takes a node index and makes [outVBegin, outVEnd) range over its node children //and [outOBegin, outOEnd) range over its object children \endcode * * To use the hierarchy, call BVIntersect or BVMinimize, passing it a BVH (or two, for cartesian product) and a minimizer or intersector. * For an intersection query on a single BVH, the intersector encapsulates the query and must provide two functions: * \code bool intersectVolume(const Volume &volume) //returns true if the query intersects the volume bool intersectObject(const Object &object) //returns true if the intersection search should terminate immediately \endcode * The guarantee that BVIntersect provides is that intersectObject will be called on every object whose bounding volume * intersects the query (but possibly on other objects too) unless the search is terminated prematurely. It is the * responsibility of the intersectObject function to keep track of the results in whatever manner is appropriate. * The cartesian product intersection and the BVMinimize queries are similar--see their individual documentation. * * The following is a simple but complete example for how to use the BVH to accelerate the search for a closest red-blue point pair: * \include BVH_Example.cpp * Output: \verbinclude BVH_Example.out */ } //@{ #include "src/BVH/BVAlgorithms.h" #include "src/BVH/KdBVH.h" //@} #endif // EIGEN_BVH_MODULE_H RcppEigen/inst/include/unsupported/Eigen/MoreVectorization0000644000176200001440000000111212253717461023556 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MOREVECTORIZATION_MODULE_H #define EIGEN_MOREVECTORIZATION_MODULE_H #include namespace Eigen { /** * \defgroup MoreVectorization More vectorization module */ } #include "src/MoreVectorization/MathFunctions.h" #endif // EIGEN_MOREVECTORIZATION_MODULE_H RcppEigen/inst/include/RcppEigenForward.h0000644000176200001440000000575412375174155020125 0ustar liggesusers// -*- mode: C++; c-indent-level: 4; c-basic-offset: 4; tab-width: 8 -*- // // RcppEigenForward.h: Rcpp/Eigen glue // // Copyright (C) 2011 Douglas Bates, Dirk Eddelbuettel and Romain Francois // // This file is part of RcppEigen. // // RcppEigen is free software: you can redistribute it and/or modify it // under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // RcppEigen is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with RcppEigen. If not, see . #ifndef RcppEigen__RcppEigenForward__h #define RcppEigen__RcppEigenForward__h #include #include #include #include #include #include #include #include //#include // causes problems redefining sign #include #include #include #include #include #include #include // also includes Eigen/Sparse #include /* forward declarations */ namespace Rcpp { /* support for wrap */ template SEXP wrap(const Eigen::CholmodDecomposition >& obj); namespace traits { /* support for as */ template class Exporter< Eigen::Map > >; template class Exporter< Eigen::Map > >; template class Exporter< Eigen::Map > >; template class Exporter< Eigen::Map > >; template class Exporter< Eigen::Matrix >; template class Exporter< Eigen::Matrix >; template class Exporter< Eigen::Matrix >; template class Exporter< Eigen::Array >; template class Exporter< Eigen::Array >; template class Exporter< Eigen::Array >; template class Exporter< Eigen::MappedSparseMatrix >; template class Exporter< Eigen::SparseMatrix >; template class Exporter< Eigen::MappedSparseMatrix >; template class Exporter< Eigen::SparseMatrix >; } // namespace traits } #endif RcppEigen/inst/include/RcppEigenWrap.h0000644000176200001440000004216712550462273017425 0ustar liggesusers// -*- mode: C++; c-indent-level: 4; c-basic-offset: 4; tab-width: 4 -*- // // RcppEigenWrap.h: Rcpp wrap methods for Eigen matrices, vectors and arrays // // Copyright (C) 2011 - 2012 Douglas Bates, Dirk Eddelbuettel and Romain Francois // // This file is part of RcppEigen. // // RcppEigen is free software: you can redistribute it and/or modify it // under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // RcppEigen is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with RcppEigen. If not, see . #ifndef RcppEigen__RcppEigenWrap__h #define RcppEigen__RcppEigenWrap__h namespace Rcpp{ namespace RcppEigen{ template SEXP Eigen_cholmod_wrap(const Eigen::CholmodDecomposition >& obj) { typedef T* Tpt; const cholmod_factor* f = obj.factor(); if (f->minor < f->n) throw std::runtime_error("CHOLMOD factorization was unsuccessful"); //FIXME: Should extend this selection according to T S4 ans(std::string(f->is_super ? "dCHMsuper" : "dCHMsimpl")); IntegerVector dd(2); dd[0] = dd[1] = f->n; ans.slot("Dim") = dd; ans.slot("perm") = ::Rcpp::wrap((int*)f->Perm, (int*)f->Perm + f->n); ans.slot("colcount") = ::Rcpp::wrap((int*)f->ColCount, (int*)f->ColCount + f->n); IntegerVector tt(f->is_super ? 6 : 4); tt[0] = f->ordering; tt[1] = f->is_ll; tt[2] = f->is_super; tt[3] = f->is_monotonic; ans.slot("type") = tt; if (f->is_super) { tt[4] = f->maxcsize; tt[5] = f->maxesize; ans.slot("super") = ::Rcpp::wrap((int*)f->super, ((int*)f->super) + f->nsuper + 1); ans.slot("pi") = ::Rcpp::wrap((int*)f->pi, ((int*)f->pi) + f->nsuper + 1); ans.slot("px") = ::Rcpp::wrap((int*)f->px, ((int*)f->px) + f->nsuper + 1); ans.slot("s") = ::Rcpp::wrap((int*)f->s, ((int*)f->s) + f->ssize); ans.slot("x") = ::Rcpp::wrap((Tpt)f->x, ((T*)f->x) + f->xsize); } else { ans.slot("i") = ::Rcpp::wrap((int*)f->i, ((int*)f->i) + f->nzmax); ans.slot("p") = ::Rcpp::wrap((int*)f->p, ((int*)f->p) + f->n + 1); ans.slot("x") = ::Rcpp::wrap((Tpt)f->x, ((T*)f->x) + f->nzmax); ans.slot("nz") = ::Rcpp::wrap((int*)f->nz, ((int*)f->nz) + f->n); ans.slot("nxt") = ::Rcpp::wrap((int*)f->next, ((int*)f->next) + f->n + 2); ans.slot("prv") = ::Rcpp::wrap((int*)f->prev, ((int*)f->prev) + f->n + 2); } return ::Rcpp::wrap(ans); } } /* namespace RcppEigen */ template SEXP wrap(const Eigen::CholmodDecomposition >& obj) { return RcppEigen::Eigen_cholmod_wrap(obj); } namespace RcppEigen{ // helper trait to identify if T is a plain object type // TODO: perhaps move this to its own file template struct is_plain : Rcpp::traits::same_type{} ; // helper trait to identify if the object has dense storage template struct is_dense : Rcpp::traits::same_type{} ; // for plain dense objects template SEXP eigen_wrap_plain_dense( const T& obj, Rcpp::traits::true_type ){ typename Eigen::internal::conditional, const T&>::type objCopy(obj); int m = obj.rows(), n = obj.cols(); SEXP ans = PROTECT(::Rcpp::wrap(objCopy.data(), objCopy.data() + m * n)); if( T::ColsAtCompileTime != 1 ) { SEXP dd = PROTECT(::Rf_allocVector(INTSXP, 2)); int *d = INTEGER(dd); d[0] = m; d[1] = n; ::Rf_setAttrib(ans, R_DimSymbol, dd); UNPROTECT(1); } UNPROTECT(1); return ans; } // for plain sparse objects template SEXP eigen_wrap_plain_dense( const T& object, Rcpp::traits::false_type ){ typedef typename T::Scalar Scalar; const int RTYPE = Rcpp::traits::r_sexptype_traits::rtype; std::string klass; switch(RTYPE) { case REALSXP: klass = T::IsRowMajor ? "dgRMatrix" : "dgCMatrix"; break; // case INTSXP: klass = T::IsRowMajor ? "igRMatrix" : "igCMatrix"; // classes not exported // break; default: throw std::invalid_argument("RTYPE not matched in conversion to sparse matrix"); } S4 ans(klass); const int nnz = object.nonZeros(); ans.slot("Dim") = Dimension(object.rows(), object.cols()); ans.slot(T::IsRowMajor ? "j" : "i") = IntegerVector(object.innerIndexPtr(), object.innerIndexPtr() + nnz); ans.slot("p") = IntegerVector(object.outerIndexPtr(), object.outerIndexPtr() + object.outerSize() + 1); ans.slot("x") = Vector(object.valuePtr(), object.valuePtr() + nnz); return ans; } // plain object, so we can assume data() and size() template inline SEXP eigen_wrap_is_plain( const T& obj, ::Rcpp::traits::true_type ){ return eigen_wrap_plain_dense( obj, typename is_dense::type() ) ; } // when the object is not plain, we need to eval()uate it template inline SEXP eigen_wrap_is_plain( const T& obj, ::Rcpp::traits::false_type ){ return eigen_wrap_is_plain( obj.eval(), Rcpp::traits::true_type() ) ; } // at that point we know that T derives from EigenBase // so it is either a plain object (Matrix, etc ...) or an expression // that eval()uates into a plain object // // so the first thing we need to do is to find out so that we don't evaluate if we don't need to template inline SEXP eigen_wrap( const T& obj ){ return eigen_wrap_is_plain( obj, typename is_plain::type() ) ; } } /* namespace RcppEigen */ namespace traits { /* support for Rcpp::as */ template class Eigen_Matrix_Exporter { public: Eigen_Matrix_Exporter(SEXP x) : vec(x), d_ncol(1), d_nrow(Rf_length(x)) { if (TYPEOF(x) != RTYPE) throw std::invalid_argument("Wrong R type for mapped vector"); if (::Rf_isMatrix(x)) { int *dims = vec.dims() ; d_nrow = dims[0]; d_ncol = dims[1]; } } T get() {return T(vec.begin(), d_nrow, d_ncol );} private: Rcpp::Vector vec ; int d_nrow, d_ncol ; } ; // modified from "class MatrixExporter" in /include/Rcpp/internal/Exporter.h // MatrixExporter uses brackets [] to index the destination matrix, // which is not supported by MatrixXd. // Here we copy data to MatrixXd.data() rather than MatrixXd template class MatrixExporterForEigen { public: typedef value_type r_export_type; MatrixExporterForEigen(SEXP x) : object(x){} ~MatrixExporterForEigen(){} T get() { Shield dims( ::Rf_getAttrib( object, R_DimSymbol ) ); if( Rf_isNull(dims) || ::Rf_length(dims) != 2 ){ throw ::Rcpp::not_a_matrix(); } int* dims_ = INTEGER(dims); T result( dims_[0], dims_[1] ); value_type *data = result.data(); ::Rcpp::internal::export_indexing( object, data ); return result ; } private: SEXP object; }; template class Exporter > > { typedef typename Eigen::Map > OUT ; const static int RTYPE = ::Rcpp::traits::r_sexptype_traits::rtype ; Rcpp::Vector vec ; public: Exporter(SEXP x) : vec(x) { if (TYPEOF(x) != RTYPE) throw std::invalid_argument("Wrong R type for mapped vector"); } OUT get() {return OUT(vec.begin(), vec.size());} } ; template class Exporter< Eigen::Map > > { typedef typename Eigen::Map > OUT ; const static int RTYPE = ::Rcpp::traits::r_sexptype_traits::rtype ; Rcpp::Vector vec ; public: Exporter(SEXP x) : vec(x) { if (TYPEOF(x) != RTYPE) throw std::invalid_argument("Wrong R type for mapped 1D array"); } OUT get() {return OUT(vec.begin(), vec.size());} } ; template class Exporter > > { typedef typename Eigen::Map > OUT ; const static int RTYPE = ::Rcpp::traits::r_sexptype_traits::rtype ; Rcpp::Vector vec ; int d_ncol, d_nrow ; public: Exporter(SEXP x) : vec(x), d_ncol(1), d_nrow(Rf_length(x)) { if (TYPEOF(x) != RTYPE) throw std::invalid_argument("Wrong R type for mapped matrix"); if (::Rf_isMatrix(x)) { int *dims = INTEGER( ::Rf_getAttrib( x, R_DimSymbol ) ) ; d_nrow = dims[0]; d_ncol = dims[1]; } } OUT get() {return OUT(vec.begin(), d_nrow, d_ncol );} } ; template class Exporter > > { typedef typename Eigen::Map > OUT ; const static int RTYPE = ::Rcpp::traits::r_sexptype_traits::rtype ; Rcpp::Vector vec ; int d_ncol, d_nrow ; public: Exporter(SEXP x) : vec(x), d_ncol(1), d_nrow(Rf_length(x)) { if (TYPEOF(x) != RTYPE) throw std::invalid_argument("Wrong R type for mapped 2D array"); if (::Rf_isMatrix(x)) { int *dims = INTEGER( ::Rf_getAttrib( x, R_DimSymbol ) ) ; d_nrow = dims[0]; d_ncol = dims[1]; } } OUT get() {return OUT(vec.begin(), d_nrow, d_ncol );} } ; template class Exporter > : public IndexingExporter, T> { public: Exporter(SEXP x) : IndexingExporter, T >(x){} }; template class Exporter > : public IndexingExporter, T> { public: Exporter(SEXP x) : IndexingExporter, T >(x){} }; template class Exporter< Eigen::Matrix > : public IndexingExporter< Eigen::Matrix, T > { public: Exporter(SEXP x) : IndexingExporter< Eigen::Matrix, T >(x){} }; template class Exporter< Eigen::Array > : public IndexingExporter< Eigen::Array, T > { public: Exporter(SEXP x) : IndexingExporter< Eigen::Array, T >(x){} }; template class Exporter< Eigen::Matrix > : public MatrixExporterForEigen< Eigen::Matrix, T > { public: Exporter(SEXP x) : MatrixExporterForEigen< Eigen::Matrix, T >(x){} }; template class Exporter< Eigen::Array > : public MatrixExporterForEigen< Eigen::Array, T > { public: Exporter(SEXP x) : MatrixExporterForEigen< Eigen::Array, T >(x){} }; template class Exporter > { public: const static int RTYPE = ::Rcpp::traits::r_sexptype_traits::rtype ; Exporter(SEXP x) : d_x(x), d_dims(d_x.slot("Dim")), d_i(d_x.slot("i")), d_p(d_x.slot("p")), xx( d_x.slot("x") ) { if (!d_x.is("dgCMatrix")) throw std::invalid_argument("Need S4 class dgCMatrix for a mapped sparse matrix"); } Eigen::MappedSparseMatrix get() { return Eigen::MappedSparseMatrix(d_dims[0], d_dims[1], d_p[d_dims[1]], d_p.begin(), d_i.begin(), xx.begin() ); } protected: S4 d_x; IntegerVector d_dims, d_i, d_p; Vector xx ; }; template class Exporter > { public: const static int RTYPE = ::Rcpp::traits::r_sexptype_traits::rtype ; Exporter(SEXP x) : d_x(x), d_dims(d_x.slot("Dim")), d_j(d_x.slot("j")), d_p(d_x.slot("p")), xx( d_x.slot("x") ) { if (!d_x.is("dgRMatrix")) throw std::invalid_argument("Need S4 class dgRMatrix for a mapped sparse matrix"); } Eigen::MappedSparseMatrix get() { return Eigen::MappedSparseMatrix(d_dims[0], d_dims[1], d_p[d_dims[1]], d_p.begin(), d_j.begin(), xx.begin() ); } protected: S4 d_x; IntegerVector d_dims, d_j, d_p; Vector xx ; }; template class Exporter > { public: const static int RTYPE = ::Rcpp::traits::r_sexptype_traits::rtype ; Exporter(SEXP x) : d_x(x), d_dims(d_x.slot("Dim")), d_i(d_x.slot("i")), d_p(d_x.slot("p")), xx(d_x.slot("x")) { if (!d_x.is("dgCMatrix")) throw std::invalid_argument("Need S4 class dgCMatrix for a sparse matrix"); } Eigen::SparseMatrix get() { Eigen::SparseMatrix ans(d_dims[0], d_dims[1]); ans.reserve(d_p[d_dims[1]]); for(int j = 0; j < d_dims[1]; ++j) { ans.startVec(j); for (int k = d_p[j]; k < d_p[j + 1]; ++k) ans.insertBack(d_i[k], j) = xx[k]; } ans.finalize(); return ans; } protected: S4 d_x; IntegerVector d_dims, d_i, d_p; Vector xx ; }; template class Exporter > { public: const static int RTYPE = ::Rcpp::traits::r_sexptype_traits::rtype ; Exporter(SEXP x) : d_x(x), d_dims(d_x.slot("Dim")), d_j(d_x.slot("j")), d_p(d_x.slot("p")), xx(d_x.slot("x")) { if (!d_x.is("dgRMatrix")) throw std::invalid_argument("Need S4 class dgRMatrix for a sparse matrix"); } Eigen::SparseMatrix get() { Eigen::SparseMatrix ans(d_dims[0], d_dims[1]); ans.reserve(d_p[d_dims[0]]); for(int i = 0; i < d_dims[0]; ++i) { ans.startVec(i); for (int k = d_p[i]; k < d_p[i + 1]; ++k) ans.insertBack(i, d_j[k]) = xx[k]; } ans.finalize(); return ans; } protected: S4 d_x; IntegerVector d_dims, d_j, d_p; Vector xx ; }; } // namespace traits } #endif RcppEigen/inst/include/RcppEigen.h0000644000176200001440000000213512253717461016564 0ustar liggesusers// -*- mode: C++; c-indent-level: 4; c-basic-offset: 4; tab-width: 8 -*- // // RcppEigen.h: Rcpp/Eigen glue // // Copyright (C) 2011 Douglas Bates, Dirk Eddelbuettel and Romain Francois // // This file is part of RcppEigen. // // RcppEigen is free software: you can redistribute it and/or modify it // under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // RcppEigen is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with RcppEigen. If not, see . #ifndef RcppEigen__RcppEigen__h #define RcppEigen__RcppEigen__h #include #include #include //#include //#include // also includes Eigen/Sparse #endif RcppEigen/inst/include/Eigen/0000755000176200001440000000000012647570720015567 5ustar liggesusersRcppEigen/inst/include/Eigen/SparseQR0000644000176200001440000000173712253717461017220 0ustar liggesusers#ifndef EIGEN_SPARSEQR_MODULE_H #define EIGEN_SPARSEQR_MODULE_H #include "SparseCore" #include "OrderingMethods" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup SparseQR_Module SparseQR module * \brief Provides QR decomposition for sparse matrices * * This module provides a simplicial version of the left-looking Sparse QR decomposition. * The columns of the input matrix should be reordered to limit the fill-in during the * decomposition. Built-in methods (COLAMD, AMD) or external methods (METIS) can be used to this end. * See the \link OrderingMethods_Module OrderingMethods\endlink module for the list * of built-in and external ordering methods. * * \code * #include * \endcode * * */ #include "src/misc/Solve.h" #include "src/misc/SparseSolve.h" #include "OrderingMethods" #include "src/SparseCore/SparseColEtree.h" #include "src/SparseQR/SparseQR.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif RcppEigen/inst/include/Eigen/Eigenvalues0000644000176200001440000000256212253717461017764 0ustar liggesusers#ifndef EIGEN_EIGENVALUES_MODULE_H #define EIGEN_EIGENVALUES_MODULE_H #include "Core" #include "src/Core/util/DisableStupidWarnings.h" #include "Cholesky" #include "Jacobi" #include "Householder" #include "LU" #include "Geometry" /** \defgroup Eigenvalues_Module Eigenvalues module * * * * This module mainly provides various eigenvalue solvers. * This module also provides some MatrixBase methods, including: * - MatrixBase::eigenvalues(), * - MatrixBase::operatorNorm() * * \code * #include * \endcode */ #include "src/Eigenvalues/Tridiagonalization.h" #include "src/Eigenvalues/RealSchur.h" #include "src/Eigenvalues/EigenSolver.h" #include "src/Eigenvalues/SelfAdjointEigenSolver.h" #include "src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h" #include "src/Eigenvalues/HessenbergDecomposition.h" #include "src/Eigenvalues/ComplexSchur.h" #include "src/Eigenvalues/ComplexEigenSolver.h" #include "src/Eigenvalues/RealQZ.h" #include "src/Eigenvalues/GeneralizedEigenSolver.h" #include "src/Eigenvalues/MatrixBaseEigenvalues.h" #ifdef EIGEN_USE_LAPACKE #include "src/Eigenvalues/RealSchur_MKL.h" #include "src/Eigenvalues/ComplexSchur_MKL.h" #include "src/Eigenvalues/SelfAdjointEigenSolver_MKL.h" #endif #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_EIGENVALUES_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ RcppEigen/inst/include/Eigen/StdVector0000644000176200001440000000136312253717461017430 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2009 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STDVECTOR_MODULE_H #define EIGEN_STDVECTOR_MODULE_H #include "Core" #include #if (defined(_MSC_VER) && defined(_WIN64)) /* MSVC auto aligns in 64 bit builds */ #define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) #else #include "src/StlSupport/StdVector.h" #endif #endif // EIGEN_STDVECTOR_MODULE_H RcppEigen/inst/include/Eigen/src/0000755000176200001440000000000012375356050016352 5ustar liggesusersRcppEigen/inst/include/Eigen/src/SparseQR/0000755000176200001440000000000012446006401020041 5ustar liggesusersRcppEigen/inst/include/Eigen/src/SparseQR/SparseQR.h0000644000176200001440000006352112446006401021721 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012-2013 Desire Nuentsa // Copyright (C) 2012-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_QR_H #define EIGEN_SPARSE_QR_H namespace Eigen { template class SparseQR; template struct SparseQRMatrixQReturnType; template struct SparseQRMatrixQTransposeReturnType; template struct SparseQR_QProduct; namespace internal { template struct traits > { typedef typename SparseQRType::MatrixType ReturnType; typedef typename ReturnType::Index Index; typedef typename ReturnType::StorageKind StorageKind; }; template struct traits > { typedef typename SparseQRType::MatrixType ReturnType; }; template struct traits > { typedef typename Derived::PlainObject ReturnType; }; } // End namespace internal /** * \ingroup SparseQR_Module * \class SparseQR * \brief Sparse left-looking rank-revealing QR factorization * * This class implements a left-looking rank-revealing QR decomposition * of sparse matrices. When a column has a norm less than a given tolerance * it is implicitly permuted to the end. The QR factorization thus obtained is * given by A*P = Q*R where R is upper triangular or trapezoidal. * * P is the column permutation which is the product of the fill-reducing and the * rank-revealing permutations. Use colsPermutation() to get it. * * Q is the orthogonal matrix represented as products of Householder reflectors. * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose. * You can then apply it to a vector. * * R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient. * matrixR().topLeftCorner(rank(), rank()) always returns a triangular factor of full rank. * * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<> * \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module * OrderingMethods \endlink module for the list of built-in and external ordering methods. * * \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()). * */ template class SparseQR { public: typedef _MatrixType MatrixType; typedef _OrderingType OrderingType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef SparseMatrix QRMatrixType; typedef Matrix IndexVector; typedef Matrix ScalarVector; typedef PermutationMatrix PermutationType; public: SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false) { } /** Construct a QR factorization of the matrix \a mat. * * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). * * \sa compute() */ SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false) { compute(mat); } /** Computes the QR factorization of the sparse matrix \a mat. * * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). * * \sa analyzePattern(), factorize() */ void compute(const MatrixType& mat) { analyzePattern(mat); factorize(mat); } void analyzePattern(const MatrixType& mat); void factorize(const MatrixType& mat); /** \returns the number of rows of the represented matrix. */ inline Index rows() const { return m_pmat.rows(); } /** \returns the number of columns of the represented matrix. */ inline Index cols() const { return m_pmat.cols();} /** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization. */ const QRMatrixType& matrixR() const { return m_R; } /** \returns the number of non linearly dependent columns as determined by the pivoting threshold. * * \sa setPivotThreshold() */ Index rank() const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); return m_nonzeropivots; } /** \returns an expression of the matrix Q as products of sparse Householder reflectors. * The common usage of this function is to apply it to a dense matrix or vector * \code * VectorXd B1, B2; * // Initialize B1 * B2 = matrixQ() * B1; * \endcode * * To get a plain SparseMatrix representation of Q: * \code * SparseMatrix Q; * Q = SparseQR >(A).matrixQ(); * \endcode * Internally, this call simply performs a sparse product between the matrix Q * and a sparse identity matrix. However, due to the fact that the sparse * reflectors are stored unsorted, two transpositions are needed to sort * them before performing the product. */ SparseQRMatrixQReturnType matrixQ() const { return SparseQRMatrixQReturnType(*this); } /** \returns a const reference to the column permutation P that was applied to A such that A*P = Q*R * It is the combination of the fill-in reducing permutation and numerical column pivoting. */ const PermutationType& colsPermutation() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_outputPerm_c; } /** \returns A string describing the type of error. * This method is provided to ease debugging, not to handle errors. */ std::string lastErrorMessage() const { return m_lastError; } /** \internal */ template bool _solve(const MatrixBase &B, MatrixBase &dest) const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); Index rank = this->rank(); // Compute Q^T * b; typename Dest::PlainObject y, b; y = this->matrixQ().transpose() * B; b = y; // Solve with the triangular matrix R y.resize((std::max)(cols(),Index(y.rows())),y.cols()); y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView().solve(b.topRows(rank)); y.bottomRows(y.rows()-rank).setZero(); // Apply the column permutation if (m_perm_c.size()) dest = colsPermutation() * y.topRows(cols()); else dest = y.topRows(cols()); m_info = Success; return true; } /** Sets the threshold that is used to determine linearly dependent columns during the factorization. * * In practice, if during the factorization the norm of the column that has to be eliminated is below * this threshold, then the entire column is treated as zero, and it is moved at the end. */ void setPivotThreshold(const RealScalar& threshold) { m_useDefaultThreshold = false; m_threshold = threshold; } /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. * * \sa compute() */ template inline const internal::solve_retval solve(const MatrixBase& B) const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); return internal::solve_retval(*this, B.derived()); } template inline const internal::sparse_solve_retval solve(const SparseMatrixBase& B) const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); return internal::sparse_solve_retval(*this, B.derived()); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was successful, * \c NumericalIssue if the QR factorization reports a numerical problem * \c InvalidInput if the input matrix is invalid * * \sa iparm() */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } protected: inline void sort_matrix_Q() { if(this->m_isQSorted) return; // The matrix Q is sorted during the transposition SparseMatrix mQrm(this->m_Q); this->m_Q = mQrm; this->m_isQSorted = true; } protected: bool m_isInitialized; bool m_analysisIsok; bool m_factorizationIsok; mutable ComputationInfo m_info; std::string m_lastError; QRMatrixType m_pmat; // Temporary matrix QRMatrixType m_R; // The triangular factor matrix QRMatrixType m_Q; // The orthogonal reflectors ScalarVector m_hcoeffs; // The Householder coefficients PermutationType m_perm_c; // Fill-reducing Column permutation PermutationType m_pivotperm; // The permutation for rank revealing PermutationType m_outputPerm_c; // The final column permutation RealScalar m_threshold; // Threshold to determine null Householder reflections bool m_useDefaultThreshold; // Use default threshold Index m_nonzeropivots; // Number of non zero pivots found IndexVector m_etree; // Column elimination tree IndexVector m_firstRowElt; // First element in each row bool m_isQSorted; // whether Q is sorted or not bool m_isEtreeOk; // whether the elimination tree match the initial input matrix template friend struct SparseQR_QProduct; template friend struct SparseQRMatrixQReturnType; }; /** \brief Preprocessing step of a QR factorization * * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). * * In this step, the fill-reducing permutation is computed and applied to the columns of A * and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited. * * \note In this step it is assumed that there is no empty row in the matrix \a mat. */ template void SparseQR::analyzePattern(const MatrixType& mat) { eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR"); // Copy to a column major matrix if the input is rowmajor typename internal::conditional::type matCpy(mat); // Compute the column fill reducing ordering OrderingType ord; ord(matCpy, m_perm_c); Index n = mat.cols(); Index m = mat.rows(); Index diagSize = (std::min)(m,n); if (!m_perm_c.size()) { m_perm_c.resize(n); m_perm_c.indices().setLinSpaced(n, 0,n-1); } // Compute the column elimination tree of the permuted matrix m_outputPerm_c = m_perm_c.inverse(); internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); m_isEtreeOk = true; m_R.resize(m, n); m_Q.resize(m, diagSize); // Allocate space for nonzero elements : rough estimation m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree m_Q.reserve(2*mat.nonZeros()); m_hcoeffs.resize(diagSize); m_analysisIsok = true; } /** \brief Performs the numerical QR factorization of the input matrix * * The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with * a matrix having the same sparsity pattern than \a mat. * * \param mat The sparse column-major matrix */ template void SparseQR::factorize(const MatrixType& mat) { using std::abs; using std::max; eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step"); Index m = mat.rows(); Index n = mat.cols(); Index diagSize = (std::min)(m,n); IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q ScalarVector tval(m); // The dense vector used to compute the current column RealScalar pivotThreshold = m_threshold; m_R.setZero(); m_Q.setZero(); m_pmat = mat; if(!m_isEtreeOk) { m_outputPerm_c = m_perm_c.inverse(); internal::coletree(m_pmat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); m_isEtreeOk = true; } m_pmat.uncompress(); // To have the innerNonZeroPtr allocated // Apply the fill-in reducing permutation lazily: { // If the input is row major, copy the original column indices, // otherwise directly use the input matrix // IndexVector originalOuterIndicesCpy; const Index *originalOuterIndices = mat.outerIndexPtr(); if(MatrixType::IsRowMajor) { originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1); originalOuterIndices = originalOuterIndicesCpy.data(); } for (int i = 0; i < n; i++) { Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i; m_pmat.outerIndexPtr()[p] = originalOuterIndices[i]; m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i]; } } /* Compute the default threshold as in MatLab, see: * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 */ if(m_useDefaultThreshold) { RealScalar max2Norm = 0.0; for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm()); if(max2Norm==RealScalar(0)) max2Norm = RealScalar(1); pivotThreshold = 20 * (m + n) * max2Norm * NumTraits::epsilon(); } // Initialize the numerical permutation m_pivotperm.setIdentity(n); Index nonzeroCol = 0; // Record the number of valid pivots m_Q.startVec(0); // Left looking rank-revealing QR factorization: compute a column of R and Q at a time for (Index col = 0; col < n; ++col) { mark.setConstant(-1); m_R.startVec(col); mark(nonzeroCol) = col; Qidx(0) = nonzeroCol; nzcolR = 0; nzcolQ = 1; bool found_diag = nonzeroCol>=m; tval.setZero(); // Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e., // all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k. // Note: if the diagonal entry does not exist, then its contribution must be explicitly added, // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found. for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp) { Index curIdx = nonzeroCol; if(itp) curIdx = itp.row(); if(curIdx == nonzeroCol) found_diag = true; // Get the nonzeros indexes of the current column of R Index st = m_firstRowElt(curIdx); // The traversal of the etree starts here if (st < 0 ) { m_lastError = "Empty row found during numerical factorization"; m_info = InvalidInput; return; } // Traverse the etree Index bi = nzcolR; for (; mark(st) != col; st = m_etree(st)) { Ridx(nzcolR) = st; // Add this row to the list, mark(st) = col; // and mark this row as visited nzcolR++; } // Reverse the list to get the topological ordering Index nt = nzcolR-bi; for(Index i = 0; i < nt/2; i++) std::swap(Ridx(bi+i), Ridx(nzcolR-i-1)); // Copy the current (curIdx,pcol) value of the input matrix if(itp) tval(curIdx) = itp.value(); else tval(curIdx) = Scalar(0); // Compute the pattern of Q(:,k) if(curIdx > nonzeroCol && mark(curIdx) != col ) { Qidx(nzcolQ) = curIdx; // Add this row to the pattern of Q, mark(curIdx) = col; // and mark it as visited nzcolQ++; } } // Browse all the indexes of R(:,col) in reverse order for (Index i = nzcolR-1; i >= 0; i--) { Index curIdx = Ridx(i); // Apply the curIdx-th householder vector to the current column (temporarily stored into tval) Scalar tdot(0); // First compute q' * tval tdot = m_Q.col(curIdx).dot(tval); tdot *= m_hcoeffs(curIdx); // Then update tval = tval - q * tau // FIXME: tval -= tdot * m_Q.col(curIdx) should amount to the same (need to check/add support for efficient "dense ?= sparse") for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq) tval(itq.row()) -= itq.value() * tdot; // Detect fill-in for the current column of Q if(m_etree(Ridx(i)) == nonzeroCol) { for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq) { Index iQ = itq.row(); if (mark(iQ) != col) { Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q, mark(iQ) = col; // and mark it as visited } } } } // End update current column Scalar tau = 0; RealScalar beta = 0; if(nonzeroCol < diagSize) { // Compute the Householder reflection that eliminate the current column // FIXME this step should call the Householder module. Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0); // First, the squared norm of Q((col+1):m, col) RealScalar sqrNorm = 0.; for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq))); if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0)) { beta = numext::real(c0); tval(Qidx(0)) = 1; } else { using std::sqrt; beta = sqrt(numext::abs2(c0) + sqrNorm); if(numext::real(c0) >= RealScalar(0)) beta = -beta; tval(Qidx(0)) = 1; for (Index itq = 1; itq < nzcolQ; ++itq) tval(Qidx(itq)) /= (c0 - beta); tau = numext::conj((beta-c0) / beta); } } // Insert values in R for (Index i = nzcolR-1; i >= 0; i--) { Index curIdx = Ridx(i); if(curIdx < nonzeroCol) { m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx); tval(curIdx) = Scalar(0.); } } if(nonzeroCol < diagSize && abs(beta) >= pivotThreshold) { m_R.insertBackByOuterInner(col, nonzeroCol) = beta; // The householder coefficient m_hcoeffs(nonzeroCol) = tau; // Record the householder reflections for (Index itq = 0; itq < nzcolQ; ++itq) { Index iQ = Qidx(itq); m_Q.insertBackByOuterInnerUnordered(nonzeroCol,iQ) = tval(iQ); tval(iQ) = Scalar(0.); } nonzeroCol++; if(nonzeroCol struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef SparseQR<_MatrixType,OrderingType> Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { dec()._solve(rhs(),dst); } }; template struct sparse_solve_retval, Rhs> : sparse_solve_retval_base, Rhs> { typedef SparseQR<_MatrixType, OrderingType> Dec; EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec, Rhs) template void evalTo(Dest& dst) const { this->defaultEvalTo(dst); } }; } // end namespace internal template struct SparseQR_QProduct : ReturnByValue > { typedef typename SparseQRType::QRMatrixType MatrixType; typedef typename SparseQRType::Scalar Scalar; typedef typename SparseQRType::Index Index; // Get the references SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) : m_qr(qr),m_other(other),m_transpose(transpose) {} inline Index rows() const { return m_transpose ? m_qr.rows() : m_qr.cols(); } inline Index cols() const { return m_other.cols(); } // Assign to a vector template void evalTo(DesType& res) const { Index m = m_qr.rows(); Index n = m_qr.cols(); Index diagSize = (std::min)(m,n); res = m_other; if (m_transpose) { eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes"); //Compute res = Q' * other column by column for(Index j = 0; j < res.cols(); j++){ for (Index k = 0; k < diagSize; k++) { Scalar tau = Scalar(0); tau = m_qr.m_Q.col(k).dot(res.col(j)); if(tau==Scalar(0)) continue; tau = tau * m_qr.m_hcoeffs(k); res.col(j) -= tau * m_qr.m_Q.col(k); } } } else { eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes"); // Compute res = Q * other column by column for(Index j = 0; j < res.cols(); j++) { for (Index k = diagSize-1; k >=0; k--) { Scalar tau = Scalar(0); tau = m_qr.m_Q.col(k).dot(res.col(j)); if(tau==Scalar(0)) continue; tau = tau * m_qr.m_hcoeffs(k); res.col(j) -= tau * m_qr.m_Q.col(k); } } } } const SparseQRType& m_qr; const Derived& m_other; bool m_transpose; }; template struct SparseQRMatrixQReturnType : public EigenBase > { typedef typename SparseQRType::Index Index; typedef typename SparseQRType::Scalar Scalar; typedef Matrix DenseMatrix; SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {} template SparseQR_QProduct operator*(const MatrixBase& other) { return SparseQR_QProduct(m_qr,other.derived(),false); } SparseQRMatrixQTransposeReturnType adjoint() const { return SparseQRMatrixQTransposeReturnType(m_qr); } inline Index rows() const { return m_qr.rows(); } inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); } // To use for operations with the transpose of Q SparseQRMatrixQTransposeReturnType transpose() const { return SparseQRMatrixQTransposeReturnType(m_qr); } template void evalTo(MatrixBase& dest) const { dest.derived() = m_qr.matrixQ() * Dest::Identity(m_qr.rows(), m_qr.rows()); } template void evalTo(SparseMatrixBase& dest) const { Dest idMat(m_qr.rows(), m_qr.rows()); idMat.setIdentity(); // Sort the sparse householder reflectors if needed const_cast(&m_qr)->sort_matrix_Q(); dest.derived() = SparseQR_QProduct(m_qr, idMat, false); } const SparseQRType& m_qr; }; template struct SparseQRMatrixQTransposeReturnType { SparseQRMatrixQTransposeReturnType(const SparseQRType& qr) : m_qr(qr) {} template SparseQR_QProduct operator*(const MatrixBase& other) { return SparseQR_QProduct(m_qr,other.derived(), true); } const SparseQRType& m_qr; }; } // end namespace Eigen #endif RcppEigen/inst/include/Eigen/src/Eigenvalues/0000755000176200001440000000000012647570720020625 5ustar liggesusersRcppEigen/inst/include/Eigen/src/Eigenvalues/RealSchur.h0000644000176200001440000004633712550462273022677 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2010,2012 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REAL_SCHUR_H #define EIGEN_REAL_SCHUR_H #include "./HessenbergDecomposition.h" namespace Eigen { /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class RealSchur * * \brief Performs a real Schur decomposition of a square matrix * * \tparam _MatrixType the type of the matrix of which we are computing the * real Schur decomposition; this is expected to be an instantiation of the * Matrix class template. * * Given a real square matrix A, this class computes the real Schur * decomposition: \f$ A = U T U^T \f$ where U is a real orthogonal matrix and * T is a real quasi-triangular matrix. An orthogonal matrix is a matrix whose * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular * matrix is a block-triangular matrix whose diagonal consists of 1-by-1 * blocks and 2-by-2 blocks with complex eigenvalues. The eigenvalues of the * blocks on the diagonal of T are the same as the eigenvalues of the matrix * A, and thus the real Schur decomposition is used in EigenSolver to compute * the eigendecomposition of a matrix. * * Call the function compute() to compute the real Schur decomposition of a * given matrix. Alternatively, you can use the RealSchur(const MatrixType&, bool) * constructor which computes the real Schur decomposition at construction * time. Once the decomposition is computed, you can use the matrixU() and * matrixT() functions to retrieve the matrices U and T in the decomposition. * * The documentation of RealSchur(const MatrixType&, bool) contains an example * of the typical use of this class. * * \note The implementation is adapted from * JAMA (public domain). * Their code is based on EISPACK. * * \sa class ComplexSchur, class EigenSolver, class ComplexEigenSolver */ template class RealSchur { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef std::complex::Real> ComplexScalar; typedef typename MatrixType::Index Index; typedef Matrix EigenvalueType; typedef Matrix ColumnVectorType; /** \brief Default constructor. * * \param [in] size Positive integer, size of the matrix whose Schur decomposition will be computed. * * The default constructor is useful in cases in which the user intends to * perform decompositions via compute(). The \p size parameter is only * used as a hint. It is not an error to give a wrong \p size, but it may * impair performance. * * \sa compute() for an example. */ RealSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) : m_matT(size, size), m_matU(size, size), m_workspaceVector(size), m_hess(size), m_isInitialized(false), m_matUisUptodate(false), m_maxIters(-1) { } /** \brief Constructor; computes real Schur decomposition of given matrix. * * \param[in] matrix Square matrix whose Schur decomposition is to be computed. * \param[in] computeU If true, both T and U are computed; if false, only T is computed. * * This constructor calls compute() to compute the Schur decomposition. * * Example: \include RealSchur_RealSchur_MatrixType.cpp * Output: \verbinclude RealSchur_RealSchur_MatrixType.out */ RealSchur(const MatrixType& matrix, bool computeU = true) : m_matT(matrix.rows(),matrix.cols()), m_matU(matrix.rows(),matrix.cols()), m_workspaceVector(matrix.rows()), m_hess(matrix.rows()), m_isInitialized(false), m_matUisUptodate(false), m_maxIters(-1) { compute(matrix, computeU); } /** \brief Returns the orthogonal matrix in the Schur decomposition. * * \returns A const reference to the matrix U. * * \pre Either the constructor RealSchur(const MatrixType&, bool) or the * member function compute(const MatrixType&, bool) has been called before * to compute the Schur decomposition of a matrix, and \p computeU was set * to true (the default value). * * \sa RealSchur(const MatrixType&, bool) for an example */ const MatrixType& matrixU() const { eigen_assert(m_isInitialized && "RealSchur is not initialized."); eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the RealSchur decomposition."); return m_matU; } /** \brief Returns the quasi-triangular matrix in the Schur decomposition. * * \returns A const reference to the matrix T. * * \pre Either the constructor RealSchur(const MatrixType&, bool) or the * member function compute(const MatrixType&, bool) has been called before * to compute the Schur decomposition of a matrix. * * \sa RealSchur(const MatrixType&, bool) for an example */ const MatrixType& matrixT() const { eigen_assert(m_isInitialized && "RealSchur is not initialized."); return m_matT; } /** \brief Computes Schur decomposition of given matrix. * * \param[in] matrix Square matrix whose Schur decomposition is to be computed. * \param[in] computeU If true, both T and U are computed; if false, only T is computed. * \returns Reference to \c *this * * The Schur decomposition is computed by first reducing the matrix to * Hessenberg form using the class HessenbergDecomposition. The Hessenberg * matrix is then reduced to triangular form by performing Francis QR * iterations with implicit double shift. The cost of computing the Schur * decomposition depends on the number of iterations; as a rough guide, it * may be taken to be \f$25n^3\f$ flops if \a computeU is true and * \f$10n^3\f$ flops if \a computeU is false. * * Example: \include RealSchur_compute.cpp * Output: \verbinclude RealSchur_compute.out * * \sa compute(const MatrixType&, bool, Index) */ RealSchur& compute(const MatrixType& matrix, bool computeU = true); /** \brief Computes Schur decomposition of a Hessenberg matrix H = Z T Z^T * \param[in] matrixH Matrix in Hessenberg form H * \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T * \param computeU Computes the matriX U of the Schur vectors * \return Reference to \c *this * * This routine assumes that the matrix is already reduced in Hessenberg form matrixH * using either the class HessenbergDecomposition or another mean. * It computes the upper quasi-triangular matrix T of the Schur decomposition of H * When computeU is true, this routine computes the matrix U such that * A = U T U^T = (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix * * NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix * is not available, the user should give an identity matrix (Q.setIdentity()) * * \sa compute(const MatrixType&, bool) */ template RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU); /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, \c NoConvergence otherwise. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "RealSchur is not initialized."); return m_info; } /** \brief Sets the maximum number of iterations allowed. * * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size * of the matrix. */ RealSchur& setMaxIterations(Index maxIters) { m_maxIters = maxIters; return *this; } /** \brief Returns the maximum number of iterations. */ Index getMaxIterations() { return m_maxIters; } /** \brief Maximum number of iterations per row. * * If not otherwise specified, the maximum number of iterations is this number times the size of the * matrix. It is currently set to 40. */ static const int m_maxIterationsPerRow = 40; private: MatrixType m_matT; MatrixType m_matU; ColumnVectorType m_workspaceVector; HessenbergDecomposition m_hess; ComputationInfo m_info; bool m_isInitialized; bool m_matUisUptodate; Index m_maxIters; typedef Matrix Vector3s; Scalar computeNormOfT(); Index findSmallSubdiagEntry(Index iu); void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift); void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo); void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector); void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace); }; template RealSchur& RealSchur::compute(const MatrixType& matrix, bool computeU) { eigen_assert(matrix.cols() == matrix.rows()); Index maxIters = m_maxIters; if (maxIters == -1) maxIters = m_maxIterationsPerRow * matrix.rows(); // Step 1. Reduce to Hessenberg form m_hess.compute(matrix); // Step 2. Reduce to real Schur form computeFromHessenberg(m_hess.matrixH(), m_hess.matrixQ(), computeU); return *this; } template template RealSchur& RealSchur::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU) { m_matT = matrixH; if(computeU) m_matU = matrixQ; Index maxIters = m_maxIters; if (maxIters == -1) maxIters = m_maxIterationsPerRow * matrixH.rows(); m_workspaceVector.resize(m_matT.cols()); Scalar* workspace = &m_workspaceVector.coeffRef(0); // The matrix m_matT is divided in three parts. // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. // Rows il,...,iu is the part we are working on (the active window). // Rows iu+1,...,end are already brought in triangular form. Index iu = m_matT.cols() - 1; Index iter = 0; // iteration count for current eigenvalue Index totalIter = 0; // iteration count for whole matrix Scalar exshift(0); // sum of exceptional shifts Scalar norm = computeNormOfT(); if(norm!=0) { while (iu >= 0) { Index il = findSmallSubdiagEntry(iu); // Check for convergence if (il == iu) // One root found { m_matT.coeffRef(iu,iu) = m_matT.coeff(iu,iu) + exshift; if (iu > 0) m_matT.coeffRef(iu, iu-1) = Scalar(0); iu--; iter = 0; } else if (il == iu-1) // Two roots found { splitOffTwoRows(iu, computeU, exshift); iu -= 2; iter = 0; } else // No convergence yet { // The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG ) Vector3s firstHouseholderVector(0,0,0), shiftInfo; computeShift(iu, iter, exshift, shiftInfo); iter = iter + 1; totalIter = totalIter + 1; if (totalIter > maxIters) break; Index im; initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector); performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace); } } } if(totalIter <= maxIters) m_info = Success; else m_info = NoConvergence; m_isInitialized = true; m_matUisUptodate = computeU; return *this; } /** \internal Computes and returns vector L1 norm of T */ template inline typename MatrixType::Scalar RealSchur::computeNormOfT() { const Index size = m_matT.cols(); // FIXME to be efficient the following would requires a triangular reduxion code // Scalar norm = m_matT.upper().cwiseAbs().sum() // + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum(); Scalar norm(0); for (Index j = 0; j < size; ++j) norm += m_matT.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum(); return norm; } /** \internal Look for single small sub-diagonal element and returns its index */ template inline typename MatrixType::Index RealSchur::findSmallSubdiagEntry(Index iu) { using std::abs; Index res = iu; while (res > 0) { Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res)); if (abs(m_matT.coeff(res,res-1)) <= NumTraits::epsilon() * s) break; res--; } return res; } /** \internal Update T given that rows iu-1 and iu decouple from the rest. */ template inline void RealSchur::splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift) { using std::sqrt; using std::abs; const Index size = m_matT.cols(); // The eigenvalues of the 2x2 matrix [a b; c d] are // trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc Scalar p = Scalar(0.5) * (m_matT.coeff(iu-1,iu-1) - m_matT.coeff(iu,iu)); Scalar q = p * p + m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu); // q = tr^2 / 4 - det = discr/4 m_matT.coeffRef(iu,iu) += exshift; m_matT.coeffRef(iu-1,iu-1) += exshift; if (q >= Scalar(0)) // Two real eigenvalues { Scalar z = sqrt(abs(q)); JacobiRotation rot; if (p >= Scalar(0)) rot.makeGivens(p + z, m_matT.coeff(iu, iu-1)); else rot.makeGivens(p - z, m_matT.coeff(iu, iu-1)); m_matT.rightCols(size-iu+1).applyOnTheLeft(iu-1, iu, rot.adjoint()); m_matT.topRows(iu+1).applyOnTheRight(iu-1, iu, rot); m_matT.coeffRef(iu, iu-1) = Scalar(0); if (computeU) m_matU.applyOnTheRight(iu-1, iu, rot); } if (iu > 1) m_matT.coeffRef(iu-1, iu-2) = Scalar(0); } /** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */ template inline void RealSchur::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo) { using std::sqrt; using std::abs; shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu); shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1); shiftInfo.coeffRef(2) = m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu); // Wilkinson's original ad hoc shift if (iter == 10) { exshift += shiftInfo.coeff(0); for (Index i = 0; i <= iu; ++i) m_matT.coeffRef(i,i) -= shiftInfo.coeff(0); Scalar s = abs(m_matT.coeff(iu,iu-1)) + abs(m_matT.coeff(iu-1,iu-2)); shiftInfo.coeffRef(0) = Scalar(0.75) * s; shiftInfo.coeffRef(1) = Scalar(0.75) * s; shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s; } // MATLAB's new ad hoc shift if (iter == 30) { Scalar s = (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0); s = s * s + shiftInfo.coeff(2); if (s > Scalar(0)) { s = sqrt(s); if (shiftInfo.coeff(1) < shiftInfo.coeff(0)) s = -s; s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0); s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s; exshift += s; for (Index i = 0; i <= iu; ++i) m_matT.coeffRef(i,i) -= s; shiftInfo.setConstant(Scalar(0.964)); } } } /** \internal Compute index im at which Francis QR step starts and the first Householder vector. */ template inline void RealSchur::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector) { using std::abs; Vector3s& v = firstHouseholderVector; // alias to save typing for (im = iu-2; im >= il; --im) { const Scalar Tmm = m_matT.coeff(im,im); const Scalar r = shiftInfo.coeff(0) - Tmm; const Scalar s = shiftInfo.coeff(1) - Tmm; v.coeffRef(0) = (r * s - shiftInfo.coeff(2)) / m_matT.coeff(im+1,im) + m_matT.coeff(im,im+1); v.coeffRef(1) = m_matT.coeff(im+1,im+1) - Tmm - r - s; v.coeffRef(2) = m_matT.coeff(im+2,im+1); if (im == il) { break; } const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2))); const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1))); if (abs(lhs) < NumTraits::epsilon() * rhs) break; } } /** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */ template inline void RealSchur::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace) { eigen_assert(im >= il); eigen_assert(im <= iu-2); const Index size = m_matT.cols(); for (Index k = im; k <= iu-2; ++k) { bool firstIteration = (k == im); Vector3s v; if (firstIteration) v = firstHouseholderVector; else v = m_matT.template block<3,1>(k,k-1); Scalar tau, beta; Matrix ess; v.makeHouseholder(ess, tau, beta); if (beta != Scalar(0)) // if v is not zero { if (firstIteration && k > il) m_matT.coeffRef(k,k-1) = -m_matT.coeff(k,k-1); else if (!firstIteration) m_matT.coeffRef(k,k-1) = beta; // These Householder transformations form the O(n^3) part of the algorithm m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace); m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace); if (computeU) m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace); } } Matrix v = m_matT.template block<2,1>(iu-1, iu-2); Scalar tau, beta; Matrix ess; v.makeHouseholder(ess, tau, beta); if (beta != Scalar(0)) // if v is not zero { m_matT.coeffRef(iu-1, iu-2) = beta; m_matT.block(iu-1, iu-1, 2, size-iu+1).applyHouseholderOnTheLeft(ess, tau, workspace); m_matT.block(0, iu-1, iu+1, 2).applyHouseholderOnTheRight(ess, tau, workspace); if (computeU) m_matU.block(0, iu-1, size, 2).applyHouseholderOnTheRight(ess, tau, workspace); } // clean up pollution due to round-off errors for (Index i = im+2; i <= iu; ++i) { m_matT.coeffRef(i,i-2) = Scalar(0); if (i > im+2) m_matT.coeffRef(i,i-3) = Scalar(0); } } } // end namespace Eigen #endif // EIGEN_REAL_SCHUR_H RcppEigen/inst/include/Eigen/src/Eigenvalues/ComplexEigenSolver.h0000644000176200001440000003001212550462273024540 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Claire Maurice // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2010,2012 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COMPLEX_EIGEN_SOLVER_H #define EIGEN_COMPLEX_EIGEN_SOLVER_H #include "./ComplexSchur.h" namespace Eigen { /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class ComplexEigenSolver * * \brief Computes eigenvalues and eigenvectors of general complex matrices * * \tparam _MatrixType the type of the matrix of which we are * computing the eigendecomposition; this is expected to be an * instantiation of the Matrix class template. * * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v * \f$. If \f$ D \f$ is a diagonal matrix with the eigenvalues on * the diagonal, and \f$ V \f$ is a matrix with the eigenvectors as * its columns, then \f$ A V = V D \f$. The matrix \f$ V \f$ is * almost always invertible, in which case we have \f$ A = V D V^{-1} * \f$. This is called the eigendecomposition. * * The main function in this class is compute(), which computes the * eigenvalues and eigenvectors of a given function. The * documentation for that function contains an example showing the * main features of the class. * * \sa class EigenSolver, class SelfAdjointEigenSolver */ template class ComplexEigenSolver { public: /** \brief Synonym for the template parameter \p _MatrixType. */ typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; /** \brief Scalar type for matrices of type #MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; /** \brief Complex scalar type for #MatrixType. * * This is \c std::complex if #Scalar is real (e.g., * \c float or \c double) and just \c Scalar if #Scalar is * complex. */ typedef std::complex ComplexScalar; /** \brief Type for vector of eigenvalues as returned by eigenvalues(). * * This is a column vector with entries of type #ComplexScalar. * The length of the vector is the size of #MatrixType. */ typedef Matrix EigenvalueType; /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). * * This is a square matrix with entries of type #ComplexScalar. * The size is the same as the size of #MatrixType. */ typedef Matrix EigenvectorType; /** \brief Default constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via compute(). */ ComplexEigenSolver() : m_eivec(), m_eivalues(), m_schur(), m_isInitialized(false), m_eigenvectorsOk(false), m_matX() {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa ComplexEigenSolver() */ ComplexEigenSolver(Index size) : m_eivec(size, size), m_eivalues(size), m_schur(size), m_isInitialized(false), m_eigenvectorsOk(false), m_matX(size, size) {} /** \brief Constructor; computes eigendecomposition of given matrix. * * \param[in] matrix Square matrix whose eigendecomposition is to be computed. * \param[in] computeEigenvectors If true, both the eigenvectors and the * eigenvalues are computed; if false, only the eigenvalues are * computed. * * This constructor calls compute() to compute the eigendecomposition. */ ComplexEigenSolver(const MatrixType& matrix, bool computeEigenvectors = true) : m_eivec(matrix.rows(),matrix.cols()), m_eivalues(matrix.cols()), m_schur(matrix.rows()), m_isInitialized(false), m_eigenvectorsOk(false), m_matX(matrix.rows(),matrix.cols()) { compute(matrix, computeEigenvectors); } /** \brief Returns the eigenvectors of given matrix. * * \returns A const reference to the matrix whose columns are the eigenvectors. * * \pre Either the constructor * ComplexEigenSolver(const MatrixType& matrix, bool) or the member * function compute(const MatrixType& matrix, bool) has been called before * to compute the eigendecomposition of a matrix, and * \p computeEigenvectors was set to true (the default). * * This function returns a matrix whose columns are the eigenvectors. Column * \f$ k \f$ is an eigenvector corresponding to eigenvalue number \f$ k * \f$ as returned by eigenvalues(). The eigenvectors are normalized to * have (Euclidean) norm equal to one. The matrix returned by this * function is the matrix \f$ V \f$ in the eigendecomposition \f$ A = V D * V^{-1} \f$, if it exists. * * Example: \include ComplexEigenSolver_eigenvectors.cpp * Output: \verbinclude ComplexEigenSolver_eigenvectors.out */ const EigenvectorType& eigenvectors() const { eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec; } /** \brief Returns the eigenvalues of given matrix. * * \returns A const reference to the column vector containing the eigenvalues. * * \pre Either the constructor * ComplexEigenSolver(const MatrixType& matrix, bool) or the member * function compute(const MatrixType& matrix, bool) has been called before * to compute the eigendecomposition of a matrix. * * This function returns a column vector containing the * eigenvalues. Eigenvalues are repeated according to their * algebraic multiplicity, so there are as many eigenvalues as * rows in the matrix. The eigenvalues are not sorted in any particular * order. * * Example: \include ComplexEigenSolver_eigenvalues.cpp * Output: \verbinclude ComplexEigenSolver_eigenvalues.out */ const EigenvalueType& eigenvalues() const { eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); return m_eivalues; } /** \brief Computes eigendecomposition of given matrix. * * \param[in] matrix Square matrix whose eigendecomposition is to be computed. * \param[in] computeEigenvectors If true, both the eigenvectors and the * eigenvalues are computed; if false, only the eigenvalues are * computed. * \returns Reference to \c *this * * This function computes the eigenvalues of the complex matrix \p matrix. * The eigenvalues() function can be used to retrieve them. If * \p computeEigenvectors is true, then the eigenvectors are also computed * and can be retrieved by calling eigenvectors(). * * The matrix is first reduced to Schur form using the * ComplexSchur class. The Schur decomposition is then used to * compute the eigenvalues and eigenvectors. * * The cost of the computation is dominated by the cost of the * Schur decomposition, which is \f$ O(n^3) \f$ where \f$ n \f$ * is the size of the matrix. * * Example: \include ComplexEigenSolver_compute.cpp * Output: \verbinclude ComplexEigenSolver_compute.out */ ComplexEigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true); /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, \c NoConvergence otherwise. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); return m_schur.info(); } /** \brief Sets the maximum number of iterations allowed. */ ComplexEigenSolver& setMaxIterations(Index maxIters) { m_schur.setMaxIterations(maxIters); return *this; } /** \brief Returns the maximum number of iterations. */ Index getMaxIterations() { return m_schur.getMaxIterations(); } protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } EigenvectorType m_eivec; EigenvalueType m_eivalues; ComplexSchur m_schur; bool m_isInitialized; bool m_eigenvectorsOk; EigenvectorType m_matX; private: void doComputeEigenvectors(const RealScalar& matrixnorm); void sortEigenvalues(bool computeEigenvectors); }; template ComplexEigenSolver& ComplexEigenSolver::compute(const MatrixType& matrix, bool computeEigenvectors) { check_template_parameters(); // this code is inspired from Jampack eigen_assert(matrix.cols() == matrix.rows()); // Do a complex Schur decomposition, A = U T U^* // The eigenvalues are on the diagonal of T. m_schur.compute(matrix, computeEigenvectors); if(m_schur.info() == Success) { m_eivalues = m_schur.matrixT().diagonal(); if(computeEigenvectors) doComputeEigenvectors(matrix.norm()); sortEigenvalues(computeEigenvectors); } m_isInitialized = true; m_eigenvectorsOk = computeEigenvectors; return *this; } template void ComplexEigenSolver::doComputeEigenvectors(const RealScalar& matrixnorm) { const Index n = m_eivalues.size(); // Compute X such that T = X D X^(-1), where D is the diagonal of T. // The matrix X is unit triangular. m_matX = EigenvectorType::Zero(n, n); for(Index k=n-1 ; k>=0 ; k--) { m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0); // Compute X(i,k) using the (i,k) entry of the equation X T = D X for(Index i=k-1 ; i>=0 ; i--) { m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k); if(k-i-1>0) m_matX.coeffRef(i,k) -= (m_schur.matrixT().row(i).segment(i+1,k-i-1) * m_matX.col(k).segment(i+1,k-i-1)).value(); ComplexScalar z = m_schur.matrixT().coeff(i,i) - m_schur.matrixT().coeff(k,k); if(z==ComplexScalar(0)) { // If the i-th and k-th eigenvalue are equal, then z equals 0. // Use a small value instead, to prevent division by zero. numext::real_ref(z) = NumTraits::epsilon() * matrixnorm; } m_matX.coeffRef(i,k) = m_matX.coeff(i,k) / z; } } // Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1) m_eivec.noalias() = m_schur.matrixU() * m_matX; // .. and normalize the eigenvectors for(Index k=0 ; k void ComplexEigenSolver::sortEigenvalues(bool computeEigenvectors) { const Index n = m_eivalues.size(); for (Index i=0; i inline \ RealSchur >& \ RealSchur >::compute(const Matrix& matrix, bool computeU) \ { \ typedef Matrix MatrixType; \ typedef MatrixType::Scalar Scalar; \ typedef MatrixType::RealScalar RealScalar; \ \ eigen_assert(matrix.cols() == matrix.rows()); \ \ lapack_int n = matrix.cols(), sdim, info; \ lapack_int lda = matrix.outerStride(); \ lapack_int matrix_order = MKLCOLROW; \ char jobvs, sort='N'; \ LAPACK_##MKLPREFIX_U##_SELECT2 select = 0; \ jobvs = (computeU) ? 'V' : 'N'; \ m_matU.resize(n, n); \ lapack_int ldvs = m_matU.outerStride(); \ m_matT = matrix; \ Matrix wr, wi; \ wr.resize(n, 1); wi.resize(n, 1); \ info = LAPACKE_##MKLPREFIX##gees( matrix_order, jobvs, sort, select, n, (MKLTYPE*)m_matT.data(), lda, &sdim, (MKLTYPE*)wr.data(), (MKLTYPE*)wi.data(), (MKLTYPE*)m_matU.data(), ldvs ); \ if(info == 0) \ m_info = Success; \ else \ m_info = NoConvergence; \ \ m_isInitialized = true; \ m_matUisUptodate = computeU; \ return *this; \ \ } EIGEN_MKL_SCHUR_REAL(double, double, d, D, ColMajor, LAPACK_COL_MAJOR) EIGEN_MKL_SCHUR_REAL(float, float, s, S, ColMajor, LAPACK_COL_MAJOR) EIGEN_MKL_SCHUR_REAL(double, double, d, D, RowMajor, LAPACK_ROW_MAJOR) EIGEN_MKL_SCHUR_REAL(float, float, s, S, RowMajor, LAPACK_ROW_MAJOR) } // end namespace Eigen #endif // EIGEN_REAL_SCHUR_MKL_H RcppEigen/inst/include/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h0000644000176200001440000001014412253717461026044 0ustar liggesusers/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to Intel(R) MKL * Self-adjoint eigenvalues/eigenvectors. ******************************************************************************** */ #ifndef EIGEN_SAEIGENSOLVER_MKL_H #define EIGEN_SAEIGENSOLVER_MKL_H #include "Eigen/src/Core/util/MKL_support.h" namespace Eigen { /** \internal Specialization for the data types supported by MKL */ #define EIGEN_MKL_EIG_SELFADJ(EIGTYPE, MKLTYPE, MKLRTYPE, MKLNAME, EIGCOLROW, MKLCOLROW ) \ template<> inline \ SelfAdjointEigenSolver >& \ SelfAdjointEigenSolver >::compute(const Matrix& matrix, int options) \ { \ eigen_assert(matrix.cols() == matrix.rows()); \ eigen_assert((options&~(EigVecMask|GenEigMask))==0 \ && (options&EigVecMask)!=EigVecMask \ && "invalid option parameter"); \ bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; \ lapack_int n = matrix.cols(), lda, matrix_order, info; \ m_eivalues.resize(n,1); \ m_subdiag.resize(n-1); \ m_eivec = matrix; \ \ if(n==1) \ { \ m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0)); \ if(computeEigenvectors) m_eivec.setOnes(n,n); \ m_info = Success; \ m_isInitialized = true; \ m_eigenvectorsOk = computeEigenvectors; \ return *this; \ } \ \ lda = matrix.outerStride(); \ matrix_order=MKLCOLROW; \ char jobz, uplo='L'/*, range='A'*/; \ jobz = computeEigenvectors ? 'V' : 'N'; \ \ info = LAPACKE_##MKLNAME( matrix_order, jobz, uplo, n, (MKLTYPE*)m_eivec.data(), lda, (MKLRTYPE*)m_eivalues.data() ); \ m_info = (info==0) ? Success : NoConvergence; \ m_isInitialized = true; \ m_eigenvectorsOk = computeEigenvectors; \ return *this; \ } EIGEN_MKL_EIG_SELFADJ(double, double, double, dsyev, ColMajor, LAPACK_COL_MAJOR) EIGEN_MKL_EIG_SELFADJ(float, float, float, ssyev, ColMajor, LAPACK_COL_MAJOR) EIGEN_MKL_EIG_SELFADJ(dcomplex, MKL_Complex16, double, zheev, ColMajor, LAPACK_COL_MAJOR) EIGEN_MKL_EIG_SELFADJ(scomplex, MKL_Complex8, float, cheev, ColMajor, LAPACK_COL_MAJOR) EIGEN_MKL_EIG_SELFADJ(double, double, double, dsyev, RowMajor, LAPACK_ROW_MAJOR) EIGEN_MKL_EIG_SELFADJ(float, float, float, ssyev, RowMajor, LAPACK_ROW_MAJOR) EIGEN_MKL_EIG_SELFADJ(dcomplex, MKL_Complex16, double, zheev, RowMajor, LAPACK_ROW_MAJOR) EIGEN_MKL_EIG_SELFADJ(scomplex, MKL_Complex8, float, cheev, RowMajor, LAPACK_ROW_MAJOR) } // end namespace Eigen #endif // EIGEN_SAEIGENSOLVER_H RcppEigen/inst/include/Eigen/src/Eigenvalues/HessenbergDecomposition.h0000644000176200001440000003365612253717461025633 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2010 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HESSENBERGDECOMPOSITION_H #define EIGEN_HESSENBERGDECOMPOSITION_H namespace Eigen { namespace internal { template struct HessenbergDecompositionMatrixHReturnType; template struct traits > { typedef MatrixType ReturnType; }; } /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class HessenbergDecomposition * * \brief Reduces a square matrix to Hessenberg form by an orthogonal similarity transformation * * \tparam _MatrixType the type of the matrix of which we are computing the Hessenberg decomposition * * This class performs an Hessenberg decomposition of a matrix \f$ A \f$. In * the real case, the Hessenberg decomposition consists of an orthogonal * matrix \f$ Q \f$ and a Hessenberg matrix \f$ H \f$ such that \f$ A = Q H * Q^T \f$. An orthogonal matrix is a matrix whose inverse equals its * transpose (\f$ Q^{-1} = Q^T \f$). A Hessenberg matrix has zeros below the * subdiagonal, so it is almost upper triangular. The Hessenberg decomposition * of a complex matrix is \f$ A = Q H Q^* \f$ with \f$ Q \f$ unitary (that is, * \f$ Q^{-1} = Q^* \f$). * * Call the function compute() to compute the Hessenberg decomposition of a * given matrix. Alternatively, you can use the * HessenbergDecomposition(const MatrixType&) constructor which computes the * Hessenberg decomposition at construction time. Once the decomposition is * computed, you can use the matrixH() and matrixQ() functions to construct * the matrices H and Q in the decomposition. * * The documentation for matrixH() contains an example of the typical use of * this class. * * \sa class ComplexSchur, class Tridiagonalization, \ref QR_Module "QR Module" */ template class HessenbergDecomposition { public: /** \brief Synonym for the template parameter \p _MatrixType. */ typedef _MatrixType MatrixType; enum { Size = MatrixType::RowsAtCompileTime, SizeMinusOne = Size == Dynamic ? Dynamic : Size - 1, Options = MatrixType::Options, MaxSize = MatrixType::MaxRowsAtCompileTime, MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : MaxSize - 1 }; /** \brief Scalar type for matrices of type #MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; /** \brief Type for vector of Householder coefficients. * * This is column vector with entries of type #Scalar. The length of the * vector is one less than the size of #MatrixType, if it is a fixed-side * type. */ typedef Matrix CoeffVectorType; /** \brief Return type of matrixQ() */ typedef HouseholderSequence::type> HouseholderSequenceType; typedef internal::HessenbergDecompositionMatrixHReturnType MatrixHReturnType; /** \brief Default constructor; the decomposition will be computed later. * * \param [in] size The size of the matrix whose Hessenberg decomposition will be computed. * * The default constructor is useful in cases in which the user intends to * perform decompositions via compute(). The \p size parameter is only * used as a hint. It is not an error to give a wrong \p size, but it may * impair performance. * * \sa compute() for an example. */ HessenbergDecomposition(Index size = Size==Dynamic ? 2 : Size) : m_matrix(size,size), m_temp(size), m_isInitialized(false) { if(size>1) m_hCoeffs.resize(size-1); } /** \brief Constructor; computes Hessenberg decomposition of given matrix. * * \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed. * * This constructor calls compute() to compute the Hessenberg * decomposition. * * \sa matrixH() for an example. */ HessenbergDecomposition(const MatrixType& matrix) : m_matrix(matrix), m_temp(matrix.rows()), m_isInitialized(false) { if(matrix.rows()<2) { m_isInitialized = true; return; } m_hCoeffs.resize(matrix.rows()-1,1); _compute(m_matrix, m_hCoeffs, m_temp); m_isInitialized = true; } /** \brief Computes Hessenberg decomposition of given matrix. * * \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed. * \returns Reference to \c *this * * The Hessenberg decomposition is computed by bringing the columns of the * matrix successively in the required form using Householder reflections * (see, e.g., Algorithm 7.4.2 in Golub \& Van Loan, %Matrix * Computations). The cost is \f$ 10n^3/3 \f$ flops, where \f$ n \f$ * denotes the size of the given matrix. * * This method reuses of the allocated data in the HessenbergDecomposition * object. * * Example: \include HessenbergDecomposition_compute.cpp * Output: \verbinclude HessenbergDecomposition_compute.out */ HessenbergDecomposition& compute(const MatrixType& matrix) { m_matrix = matrix; if(matrix.rows()<2) { m_isInitialized = true; return *this; } m_hCoeffs.resize(matrix.rows()-1,1); _compute(m_matrix, m_hCoeffs, m_temp); m_isInitialized = true; return *this; } /** \brief Returns the Householder coefficients. * * \returns a const reference to the vector of Householder coefficients * * \pre Either the constructor HessenbergDecomposition(const MatrixType&) * or the member function compute(const MatrixType&) has been called * before to compute the Hessenberg decomposition of a matrix. * * The Householder coefficients allow the reconstruction of the matrix * \f$ Q \f$ in the Hessenberg decomposition from the packed data. * * \sa packedMatrix(), \ref Householder_Module "Householder module" */ const CoeffVectorType& householderCoefficients() const { eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); return m_hCoeffs; } /** \brief Returns the internal representation of the decomposition * * \returns a const reference to a matrix with the internal representation * of the decomposition. * * \pre Either the constructor HessenbergDecomposition(const MatrixType&) * or the member function compute(const MatrixType&) has been called * before to compute the Hessenberg decomposition of a matrix. * * The returned matrix contains the following information: * - the upper part and lower sub-diagonal represent the Hessenberg matrix H * - the rest of the lower part contains the Householder vectors that, combined with * Householder coefficients returned by householderCoefficients(), * allows to reconstruct the matrix Q as * \f$ Q = H_{N-1} \ldots H_1 H_0 \f$. * Here, the matrices \f$ H_i \f$ are the Householder transformations * \f$ H_i = (I - h_i v_i v_i^T) \f$ * where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and * \f$ v_i \f$ is the Householder vector defined by * \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$ * with M the matrix returned by this function. * * See LAPACK for further details on this packed storage. * * Example: \include HessenbergDecomposition_packedMatrix.cpp * Output: \verbinclude HessenbergDecomposition_packedMatrix.out * * \sa householderCoefficients() */ const MatrixType& packedMatrix() const { eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); return m_matrix; } /** \brief Reconstructs the orthogonal matrix Q in the decomposition * * \returns object representing the matrix Q * * \pre Either the constructor HessenbergDecomposition(const MatrixType&) * or the member function compute(const MatrixType&) has been called * before to compute the Hessenberg decomposition of a matrix. * * This function returns a light-weight object of template class * HouseholderSequence. You can either apply it directly to a matrix or * you can convert it to a matrix of type #MatrixType. * * \sa matrixH() for an example, class HouseholderSequence */ HouseholderSequenceType matrixQ() const { eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate()) .setLength(m_matrix.rows() - 1) .setShift(1); } /** \brief Constructs the Hessenberg matrix H in the decomposition * * \returns expression object representing the matrix H * * \pre Either the constructor HessenbergDecomposition(const MatrixType&) * or the member function compute(const MatrixType&) has been called * before to compute the Hessenberg decomposition of a matrix. * * The object returned by this function constructs the Hessenberg matrix H * when it is assigned to a matrix or otherwise evaluated. The matrix H is * constructed from the packed matrix as returned by packedMatrix(): The * upper part (including the subdiagonal) of the packed matrix contains * the matrix H. It may sometimes be better to directly use the packed * matrix instead of constructing the matrix H. * * Example: \include HessenbergDecomposition_matrixH.cpp * Output: \verbinclude HessenbergDecomposition_matrixH.out * * \sa matrixQ(), packedMatrix() */ MatrixHReturnType matrixH() const { eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); return MatrixHReturnType(*this); } private: typedef Matrix VectorType; typedef typename NumTraits::Real RealScalar; static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp); protected: MatrixType m_matrix; CoeffVectorType m_hCoeffs; VectorType m_temp; bool m_isInitialized; }; /** \internal * Performs a tridiagonal decomposition of \a matA in place. * * \param matA the input selfadjoint matrix * \param hCoeffs returned Householder coefficients * * The result is written in the lower triangular part of \a matA. * * Implemented from Golub's "%Matrix Computations", algorithm 8.3.1. * * \sa packedMatrix() */ template void HessenbergDecomposition::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp) { eigen_assert(matA.rows()==matA.cols()); Index n = matA.rows(); temp.resize(n); for (Index i = 0; i struct HessenbergDecompositionMatrixHReturnType : public ReturnByValue > { typedef typename MatrixType::Index Index; public: /** \brief Constructor. * * \param[in] hess Hessenberg decomposition */ HessenbergDecompositionMatrixHReturnType(const HessenbergDecomposition& hess) : m_hess(hess) { } /** \brief Hessenberg matrix in decomposition. * * \param[out] result Hessenberg matrix in decomposition \p hess which * was passed to the constructor */ template inline void evalTo(ResultType& result) const { result = m_hess.packedMatrix(); Index n = result.rows(); if (n>2) result.bottomLeftCorner(n-2, n-2).template triangularView().setZero(); } Index rows() const { return m_hess.packedMatrix().rows(); } Index cols() const { return m_hess.packedMatrix().cols(); } protected: const HessenbergDecomposition& m_hess; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_HESSENBERGDECOMPOSITION_H RcppEigen/inst/include/Eigen/src/Eigenvalues/ComplexSchur.h0000644000176200001440000004067512253717461023424 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Claire Maurice // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2010,2012 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COMPLEX_SCHUR_H #define EIGEN_COMPLEX_SCHUR_H #include "./HessenbergDecomposition.h" namespace Eigen { namespace internal { template struct complex_schur_reduce_to_hessenberg; } /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class ComplexSchur * * \brief Performs a complex Schur decomposition of a real or complex square matrix * * \tparam _MatrixType the type of the matrix of which we are * computing the Schur decomposition; this is expected to be an * instantiation of the Matrix class template. * * Given a real or complex square matrix A, this class computes the * Schur decomposition: \f$ A = U T U^*\f$ where U is a unitary * complex matrix, and T is a complex upper triangular matrix. The * diagonal of the matrix T corresponds to the eigenvalues of the * matrix A. * * Call the function compute() to compute the Schur decomposition of * a given matrix. Alternatively, you can use the * ComplexSchur(const MatrixType&, bool) constructor which computes * the Schur decomposition at construction time. Once the * decomposition is computed, you can use the matrixU() and matrixT() * functions to retrieve the matrices U and V in the decomposition. * * \note This code is inspired from Jampack * * \sa class RealSchur, class EigenSolver, class ComplexEigenSolver */ template class ComplexSchur { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; /** \brief Scalar type for matrices of type \p _MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; /** \brief Complex scalar type for \p _MatrixType. * * This is \c std::complex if #Scalar is real (e.g., * \c float or \c double) and just \c Scalar if #Scalar is * complex. */ typedef std::complex ComplexScalar; /** \brief Type for the matrices in the Schur decomposition. * * This is a square matrix with entries of type #ComplexScalar. * The size is the same as the size of \p _MatrixType. */ typedef Matrix ComplexMatrixType; /** \brief Default constructor. * * \param [in] size Positive integer, size of the matrix whose Schur decomposition will be computed. * * The default constructor is useful in cases in which the user * intends to perform decompositions via compute(). The \p size * parameter is only used as a hint. It is not an error to give a * wrong \p size, but it may impair performance. * * \sa compute() for an example. */ ComplexSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) : m_matT(size,size), m_matU(size,size), m_hess(size), m_isInitialized(false), m_matUisUptodate(false), m_maxIters(-1) {} /** \brief Constructor; computes Schur decomposition of given matrix. * * \param[in] matrix Square matrix whose Schur decomposition is to be computed. * \param[in] computeU If true, both T and U are computed; if false, only T is computed. * * This constructor calls compute() to compute the Schur decomposition. * * \sa matrixT() and matrixU() for examples. */ ComplexSchur(const MatrixType& matrix, bool computeU = true) : m_matT(matrix.rows(),matrix.cols()), m_matU(matrix.rows(),matrix.cols()), m_hess(matrix.rows()), m_isInitialized(false), m_matUisUptodate(false), m_maxIters(-1) { compute(matrix, computeU); } /** \brief Returns the unitary matrix in the Schur decomposition. * * \returns A const reference to the matrix U. * * It is assumed that either the constructor * ComplexSchur(const MatrixType& matrix, bool computeU) or the * member function compute(const MatrixType& matrix, bool computeU) * has been called before to compute the Schur decomposition of a * matrix, and that \p computeU was set to true (the default * value). * * Example: \include ComplexSchur_matrixU.cpp * Output: \verbinclude ComplexSchur_matrixU.out */ const ComplexMatrixType& matrixU() const { eigen_assert(m_isInitialized && "ComplexSchur is not initialized."); eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the ComplexSchur decomposition."); return m_matU; } /** \brief Returns the triangular matrix in the Schur decomposition. * * \returns A const reference to the matrix T. * * It is assumed that either the constructor * ComplexSchur(const MatrixType& matrix, bool computeU) or the * member function compute(const MatrixType& matrix, bool computeU) * has been called before to compute the Schur decomposition of a * matrix. * * Note that this function returns a plain square matrix. If you want to reference * only the upper triangular part, use: * \code schur.matrixT().triangularView() \endcode * * Example: \include ComplexSchur_matrixT.cpp * Output: \verbinclude ComplexSchur_matrixT.out */ const ComplexMatrixType& matrixT() const { eigen_assert(m_isInitialized && "ComplexSchur is not initialized."); return m_matT; } /** \brief Computes Schur decomposition of given matrix. * * \param[in] matrix Square matrix whose Schur decomposition is to be computed. * \param[in] computeU If true, both T and U are computed; if false, only T is computed. * \returns Reference to \c *this * * The Schur decomposition is computed by first reducing the * matrix to Hessenberg form using the class * HessenbergDecomposition. The Hessenberg matrix is then reduced * to triangular form by performing QR iterations with a single * shift. The cost of computing the Schur decomposition depends * on the number of iterations; as a rough guide, it may be taken * on the number of iterations; as a rough guide, it may be taken * to be \f$25n^3\f$ complex flops, or \f$10n^3\f$ complex flops * if \a computeU is false. * * Example: \include ComplexSchur_compute.cpp * Output: \verbinclude ComplexSchur_compute.out * * \sa compute(const MatrixType&, bool, Index) */ ComplexSchur& compute(const MatrixType& matrix, bool computeU = true); /** \brief Compute Schur decomposition from a given Hessenberg matrix * \param[in] matrixH Matrix in Hessenberg form H * \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T * \param computeU Computes the matriX U of the Schur vectors * \return Reference to \c *this * * This routine assumes that the matrix is already reduced in Hessenberg form matrixH * using either the class HessenbergDecomposition or another mean. * It computes the upper quasi-triangular matrix T of the Schur decomposition of H * When computeU is true, this routine computes the matrix U such that * A = U T U^T = (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix * * NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix * is not available, the user should give an identity matrix (Q.setIdentity()) * * \sa compute(const MatrixType&, bool) */ template ComplexSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU=true); /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, \c NoConvergence otherwise. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "ComplexSchur is not initialized."); return m_info; } /** \brief Sets the maximum number of iterations allowed. * * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size * of the matrix. */ ComplexSchur& setMaxIterations(Index maxIters) { m_maxIters = maxIters; return *this; } /** \brief Returns the maximum number of iterations. */ Index getMaxIterations() { return m_maxIters; } /** \brief Maximum number of iterations per row. * * If not otherwise specified, the maximum number of iterations is this number times the size of the * matrix. It is currently set to 30. */ static const int m_maxIterationsPerRow = 30; protected: ComplexMatrixType m_matT, m_matU; HessenbergDecomposition m_hess; ComputationInfo m_info; bool m_isInitialized; bool m_matUisUptodate; Index m_maxIters; private: bool subdiagonalEntryIsNeglegible(Index i); ComplexScalar computeShift(Index iu, Index iter); void reduceToTriangularForm(bool computeU); friend struct internal::complex_schur_reduce_to_hessenberg::IsComplex>; }; /** If m_matT(i+1,i) is neglegible in floating point arithmetic * compared to m_matT(i,i) and m_matT(j,j), then set it to zero and * return true, else return false. */ template inline bool ComplexSchur::subdiagonalEntryIsNeglegible(Index i) { RealScalar d = numext::norm1(m_matT.coeff(i,i)) + numext::norm1(m_matT.coeff(i+1,i+1)); RealScalar sd = numext::norm1(m_matT.coeff(i+1,i)); if (internal::isMuchSmallerThan(sd, d, NumTraits::epsilon())) { m_matT.coeffRef(i+1,i) = ComplexScalar(0); return true; } return false; } /** Compute the shift in the current QR iteration. */ template typename ComplexSchur::ComplexScalar ComplexSchur::computeShift(Index iu, Index iter) { using std::abs; if (iter == 10 || iter == 20) { // exceptional shift, taken from http://www.netlib.org/eispack/comqr.f return abs(numext::real(m_matT.coeff(iu,iu-1))) + abs(numext::real(m_matT.coeff(iu-1,iu-2))); } // compute the shift as one of the eigenvalues of t, the 2x2 // diagonal block on the bottom of the active submatrix Matrix t = m_matT.template block<2,2>(iu-1,iu-1); RealScalar normt = t.cwiseAbs().sum(); t /= normt; // the normalization by sf is to avoid under/overflow ComplexScalar b = t.coeff(0,1) * t.coeff(1,0); ComplexScalar c = t.coeff(0,0) - t.coeff(1,1); ComplexScalar disc = sqrt(c*c + RealScalar(4)*b); ComplexScalar det = t.coeff(0,0) * t.coeff(1,1) - b; ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1); ComplexScalar eival1 = (trace + disc) / RealScalar(2); ComplexScalar eival2 = (trace - disc) / RealScalar(2); if(numext::norm1(eival1) > numext::norm1(eival2)) eival2 = det / eival1; else eival1 = det / eival2; // choose the eigenvalue closest to the bottom entry of the diagonal if(numext::norm1(eival1-t.coeff(1,1)) < numext::norm1(eival2-t.coeff(1,1))) return normt * eival1; else return normt * eival2; } template ComplexSchur& ComplexSchur::compute(const MatrixType& matrix, bool computeU) { m_matUisUptodate = false; eigen_assert(matrix.cols() == matrix.rows()); if(matrix.cols() == 1) { m_matT = matrix.template cast(); if(computeU) m_matU = ComplexMatrixType::Identity(1,1); m_info = Success; m_isInitialized = true; m_matUisUptodate = computeU; return *this; } internal::complex_schur_reduce_to_hessenberg::IsComplex>::run(*this, matrix, computeU); computeFromHessenberg(m_matT, m_matU, computeU); return *this; } template template ComplexSchur& ComplexSchur::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU) { m_matT = matrixH; if(computeU) m_matU = matrixQ; reduceToTriangularForm(computeU); return *this; } namespace internal { /* Reduce given matrix to Hessenberg form */ template struct complex_schur_reduce_to_hessenberg { // this is the implementation for the case IsComplex = true static void run(ComplexSchur& _this, const MatrixType& matrix, bool computeU) { _this.m_hess.compute(matrix); _this.m_matT = _this.m_hess.matrixH(); if(computeU) _this.m_matU = _this.m_hess.matrixQ(); } }; template struct complex_schur_reduce_to_hessenberg { static void run(ComplexSchur& _this, const MatrixType& matrix, bool computeU) { typedef typename ComplexSchur::ComplexScalar ComplexScalar; // Note: m_hess is over RealScalar; m_matT and m_matU is over ComplexScalar _this.m_hess.compute(matrix); _this.m_matT = _this.m_hess.matrixH().template cast(); if(computeU) { // This may cause an allocation which seems to be avoidable MatrixType Q = _this.m_hess.matrixQ(); _this.m_matU = Q.template cast(); } } }; } // end namespace internal // Reduce the Hessenberg matrix m_matT to triangular form by QR iteration. template void ComplexSchur::reduceToTriangularForm(bool computeU) { Index maxIters = m_maxIters; if (maxIters == -1) maxIters = m_maxIterationsPerRow * m_matT.rows(); // The matrix m_matT is divided in three parts. // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. // Rows il,...,iu is the part we are working on (the active submatrix). // Rows iu+1,...,end are already brought in triangular form. Index iu = m_matT.cols() - 1; Index il; Index iter = 0; // number of iterations we are working on the (iu,iu) element Index totalIter = 0; // number of iterations for whole matrix while(true) { // find iu, the bottom row of the active submatrix while(iu > 0) { if(!subdiagonalEntryIsNeglegible(iu-1)) break; iter = 0; --iu; } // if iu is zero then we are done; the whole matrix is triangularized if(iu==0) break; // if we spent too many iterations, we give up iter++; totalIter++; if(totalIter > maxIters) break; // find il, the top row of the active submatrix il = iu-1; while(il > 0 && !subdiagonalEntryIsNeglegible(il-1)) { --il; } /* perform the QR step using Givens rotations. The first rotation creates a bulge; the (il+2,il) element becomes nonzero. This bulge is chased down to the bottom of the active submatrix. */ ComplexScalar shift = computeShift(iu, iter); JacobiRotation rot; rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il)); m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint()); m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot); if(computeU) m_matU.applyOnTheRight(il, il+1, rot); for(Index i=il+1 ; i // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REAL_QZ_H #define EIGEN_REAL_QZ_H namespace Eigen { /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class RealQZ * * \brief Performs a real QZ decomposition of a pair of square matrices * * \tparam _MatrixType the type of the matrix of which we are computing the * real QZ decomposition; this is expected to be an instantiation of the * Matrix class template. * * Given a real square matrices A and B, this class computes the real QZ * decomposition: \f$ A = Q S Z \f$, \f$ B = Q T Z \f$ where Q and Z are * real orthogonal matrixes, T is upper-triangular matrix, and S is upper * quasi-triangular matrix. An orthogonal matrix is a matrix whose * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular * matrix is a block-triangular matrix whose diagonal consists of 1-by-1 * blocks and 2-by-2 blocks where further reduction is impossible due to * complex eigenvalues. * * The eigenvalues of the pencil \f$ A - z B \f$ can be obtained from * 1x1 and 2x2 blocks on the diagonals of S and T. * * Call the function compute() to compute the real QZ decomposition of a * given pair of matrices. Alternatively, you can use the * RealQZ(const MatrixType& B, const MatrixType& B, bool computeQZ) * constructor which computes the real QZ decomposition at construction * time. Once the decomposition is computed, you can use the matrixS(), * matrixT(), matrixQ() and matrixZ() functions to retrieve the matrices * S, T, Q and Z in the decomposition. If computeQZ==false, some time * is saved by not computing matrices Q and Z. * * Example: \include RealQZ_compute.cpp * Output: \include RealQZ_compute.out * * \note The implementation is based on the algorithm in "Matrix Computations" * by Gene H. Golub and Charles F. Van Loan, and a paper "An algorithm for * generalized eigenvalue problems" by C.B.Moler and G.W.Stewart. * * \sa class RealSchur, class ComplexSchur, class EigenSolver, class ComplexEigenSolver */ template class RealQZ { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef std::complex::Real> ComplexScalar; typedef typename MatrixType::Index Index; typedef Matrix EigenvalueType; typedef Matrix ColumnVectorType; /** \brief Default constructor. * * \param [in] size Positive integer, size of the matrix whose QZ decomposition will be computed. * * The default constructor is useful in cases in which the user intends to * perform decompositions via compute(). The \p size parameter is only * used as a hint. It is not an error to give a wrong \p size, but it may * impair performance. * * \sa compute() for an example. */ RealQZ(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) : m_S(size, size), m_T(size, size), m_Q(size, size), m_Z(size, size), m_workspace(size*2), m_maxIters(400), m_isInitialized(false) { } /** \brief Constructor; computes real QZ decomposition of given matrices * * \param[in] A Matrix A. * \param[in] B Matrix B. * \param[in] computeQZ If false, A and Z are not computed. * * This constructor calls compute() to compute the QZ decomposition. */ RealQZ(const MatrixType& A, const MatrixType& B, bool computeQZ = true) : m_S(A.rows(),A.cols()), m_T(A.rows(),A.cols()), m_Q(A.rows(),A.cols()), m_Z(A.rows(),A.cols()), m_workspace(A.rows()*2), m_maxIters(400), m_isInitialized(false) { compute(A, B, computeQZ); } /** \brief Returns matrix Q in the QZ decomposition. * * \returns A const reference to the matrix Q. */ const MatrixType& matrixQ() const { eigen_assert(m_isInitialized && "RealQZ is not initialized."); eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition."); return m_Q; } /** \brief Returns matrix Z in the QZ decomposition. * * \returns A const reference to the matrix Z. */ const MatrixType& matrixZ() const { eigen_assert(m_isInitialized && "RealQZ is not initialized."); eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition."); return m_Z; } /** \brief Returns matrix S in the QZ decomposition. * * \returns A const reference to the matrix S. */ const MatrixType& matrixS() const { eigen_assert(m_isInitialized && "RealQZ is not initialized."); return m_S; } /** \brief Returns matrix S in the QZ decomposition. * * \returns A const reference to the matrix S. */ const MatrixType& matrixT() const { eigen_assert(m_isInitialized && "RealQZ is not initialized."); return m_T; } /** \brief Computes QZ decomposition of given matrix. * * \param[in] A Matrix A. * \param[in] B Matrix B. * \param[in] computeQZ If false, A and Z are not computed. * \returns Reference to \c *this */ RealQZ& compute(const MatrixType& A, const MatrixType& B, bool computeQZ = true); /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, \c NoConvergence otherwise. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "RealQZ is not initialized."); return m_info; } /** \brief Returns number of performed QR-like iterations. */ Index iterations() const { eigen_assert(m_isInitialized && "RealQZ is not initialized."); return m_global_iter; } /** Sets the maximal number of iterations allowed to converge to one eigenvalue * or decouple the problem. */ RealQZ& setMaxIterations(Index maxIters) { m_maxIters = maxIters; return *this; } private: MatrixType m_S, m_T, m_Q, m_Z; Matrix m_workspace; ComputationInfo m_info; Index m_maxIters; bool m_isInitialized; bool m_computeQZ; Scalar m_normOfT, m_normOfS; Index m_global_iter; typedef Matrix Vector3s; typedef Matrix Vector2s; typedef Matrix Matrix2s; typedef JacobiRotation JRs; void hessenbergTriangular(); void computeNorms(); Index findSmallSubdiagEntry(Index iu); Index findSmallDiagEntry(Index f, Index l); void splitOffTwoRows(Index i); void pushDownZero(Index z, Index f, Index l); void step(Index f, Index l, Index iter); }; // RealQZ /** \internal Reduces S and T to upper Hessenberg - triangular form */ template void RealQZ::hessenbergTriangular() { const Index dim = m_S.cols(); // perform QR decomposition of T, overwrite T with R, save Q HouseholderQR qrT(m_T); m_T = qrT.matrixQR(); m_T.template triangularView().setZero(); m_Q = qrT.householderQ(); // overwrite S with Q* S m_S.applyOnTheLeft(m_Q.adjoint()); // init Z as Identity if (m_computeQZ) m_Z = MatrixType::Identity(dim,dim); // reduce S to upper Hessenberg with Givens rotations for (Index j=0; j<=dim-3; j++) { for (Index i=dim-1; i>=j+2; i--) { JRs G; // kill S(i,j) if(m_S.coeff(i,j) != 0) { G.makeGivens(m_S.coeff(i-1,j), m_S.coeff(i,j), &m_S.coeffRef(i-1, j)); m_S.coeffRef(i,j) = Scalar(0.0); m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint()); m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint()); // update Q if (m_computeQZ) m_Q.applyOnTheRight(i-1,i,G); } // kill T(i,i-1) if(m_T.coeff(i,i-1)!=Scalar(0)) { G.makeGivens(m_T.coeff(i,i), m_T.coeff(i,i-1), &m_T.coeffRef(i,i)); m_T.coeffRef(i,i-1) = Scalar(0.0); m_S.applyOnTheRight(i,i-1,G); m_T.topRows(i).applyOnTheRight(i,i-1,G); // update Z if (m_computeQZ) m_Z.applyOnTheLeft(i,i-1,G.adjoint()); } } } } /** \internal Computes vector L1 norms of S and T when in Hessenberg-Triangular form already */ template inline void RealQZ::computeNorms() { const Index size = m_S.cols(); m_normOfS = Scalar(0.0); m_normOfT = Scalar(0.0); for (Index j = 0; j < size; ++j) { m_normOfS += m_S.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum(); m_normOfT += m_T.row(j).segment(j, size - j).cwiseAbs().sum(); } } /** \internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */ template inline typename MatrixType::Index RealQZ::findSmallSubdiagEntry(Index iu) { using std::abs; Index res = iu; while (res > 0) { Scalar s = abs(m_S.coeff(res-1,res-1)) + abs(m_S.coeff(res,res)); if (s == Scalar(0.0)) s = m_normOfS; if (abs(m_S.coeff(res,res-1)) < NumTraits::epsilon() * s) break; res--; } return res; } /** \internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1) */ template inline typename MatrixType::Index RealQZ::findSmallDiagEntry(Index f, Index l) { using std::abs; Index res = l; while (res >= f) { if (abs(m_T.coeff(res,res)) <= NumTraits::epsilon() * m_normOfT) break; res--; } return res; } /** \internal decouple 2x2 diagonal block in rows i, i+1 if eigenvalues are real */ template inline void RealQZ::splitOffTwoRows(Index i) { using std::abs; using std::sqrt; const Index dim=m_S.cols(); if (abs(m_S.coeff(i+1,i))==Scalar(0)) return; Index z = findSmallDiagEntry(i,i+1); if (z==i-1) { // block of (S T^{-1}) Matrix2s STi = m_T.template block<2,2>(i,i).template triangularView(). template solve(m_S.template block<2,2>(i,i)); Scalar p = Scalar(0.5)*(STi(0,0)-STi(1,1)); Scalar q = p*p + STi(1,0)*STi(0,1); if (q>=0) { Scalar z = sqrt(q); // one QR-like iteration for ABi - lambda I // is enough - when we know exact eigenvalue in advance, // convergence is immediate JRs G; if (p>=0) G.makeGivens(p + z, STi(1,0)); else G.makeGivens(p - z, STi(1,0)); m_S.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint()); m_T.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint()); // update Q if (m_computeQZ) m_Q.applyOnTheRight(i,i+1,G); G.makeGivens(m_T.coeff(i+1,i+1), m_T.coeff(i+1,i)); m_S.topRows(i+2).applyOnTheRight(i+1,i,G); m_T.topRows(i+2).applyOnTheRight(i+1,i,G); // update Z if (m_computeQZ) m_Z.applyOnTheLeft(i+1,i,G.adjoint()); m_S.coeffRef(i+1,i) = Scalar(0.0); m_T.coeffRef(i+1,i) = Scalar(0.0); } } else { pushDownZero(z,i,i+1); } } /** \internal use zero in T(z,z) to zero S(l,l-1), working in block f..l */ template inline void RealQZ::pushDownZero(Index z, Index f, Index l) { JRs G; const Index dim = m_S.cols(); for (Index zz=z; zzf ? (zz-1) : zz; G.makeGivens(m_T.coeff(zz, zz+1), m_T.coeff(zz+1, zz+1)); m_S.rightCols(dim-firstColS).applyOnTheLeft(zz,zz+1,G.adjoint()); m_T.rightCols(dim-zz).applyOnTheLeft(zz,zz+1,G.adjoint()); m_T.coeffRef(zz+1,zz+1) = Scalar(0.0); // update Q if (m_computeQZ) m_Q.applyOnTheRight(zz,zz+1,G); // kill S(zz+1, zz-1) if (zz>f) { G.makeGivens(m_S.coeff(zz+1, zz), m_S.coeff(zz+1,zz-1)); m_S.topRows(zz+2).applyOnTheRight(zz, zz-1,G); m_T.topRows(zz+1).applyOnTheRight(zz, zz-1,G); m_S.coeffRef(zz+1,zz-1) = Scalar(0.0); // update Z if (m_computeQZ) m_Z.applyOnTheLeft(zz,zz-1,G.adjoint()); } } // finally kill S(l,l-1) G.makeGivens(m_S.coeff(l,l), m_S.coeff(l,l-1)); m_S.applyOnTheRight(l,l-1,G); m_T.applyOnTheRight(l,l-1,G); m_S.coeffRef(l,l-1)=Scalar(0.0); // update Z if (m_computeQZ) m_Z.applyOnTheLeft(l,l-1,G.adjoint()); } /** \internal QR-like iterative step for block f..l */ template inline void RealQZ::step(Index f, Index l, Index iter) { using std::abs; const Index dim = m_S.cols(); // x, y, z Scalar x, y, z; if (iter==10) { // Wilkinson ad hoc shift const Scalar a11=m_S.coeff(f+0,f+0), a12=m_S.coeff(f+0,f+1), a21=m_S.coeff(f+1,f+0), a22=m_S.coeff(f+1,f+1), a32=m_S.coeff(f+2,f+1), b12=m_T.coeff(f+0,f+1), b11i=Scalar(1.0)/m_T.coeff(f+0,f+0), b22i=Scalar(1.0)/m_T.coeff(f+1,f+1), a87=m_S.coeff(l-1,l-2), a98=m_S.coeff(l-0,l-1), b77i=Scalar(1.0)/m_T.coeff(l-2,l-2), b88i=Scalar(1.0)/m_T.coeff(l-1,l-1); Scalar ss = abs(a87*b77i) + abs(a98*b88i), lpl = Scalar(1.5)*ss, ll = ss*ss; x = ll + a11*a11*b11i*b11i - lpl*a11*b11i + a12*a21*b11i*b22i - a11*a21*b12*b11i*b11i*b22i; y = a11*a21*b11i*b11i - lpl*a21*b11i + a21*a22*b11i*b22i - a21*a21*b12*b11i*b11i*b22i; z = a21*a32*b11i*b22i; } else if (iter==16) { // another exceptional shift x = m_S.coeff(f,f)/m_T.coeff(f,f)-m_S.coeff(l,l)/m_T.coeff(l,l) + m_S.coeff(l,l-1)*m_T.coeff(l-1,l) / (m_T.coeff(l-1,l-1)*m_T.coeff(l,l)); y = m_S.coeff(f+1,f)/m_T.coeff(f,f); z = 0; } else if (iter>23 && !(iter%8)) { // extremely exceptional shift x = internal::random(-1.0,1.0); y = internal::random(-1.0,1.0); z = internal::random(-1.0,1.0); } else { // Compute the shifts: (x,y,z,0...) = (AB^-1 - l1 I) (AB^-1 - l2 I) e1 // where l1 and l2 are the eigenvalues of the 2x2 matrix C = U V^-1 where // U and V are 2x2 bottom right sub matrices of A and B. Thus: // = AB^-1AB^-1 + l1 l2 I - (l1+l2)(AB^-1) // = AB^-1AB^-1 + det(M) - tr(M)(AB^-1) // Since we are only interested in having x, y, z with a correct ratio, we have: const Scalar a11 = m_S.coeff(f,f), a12 = m_S.coeff(f,f+1), a21 = m_S.coeff(f+1,f), a22 = m_S.coeff(f+1,f+1), a32 = m_S.coeff(f+2,f+1), a88 = m_S.coeff(l-1,l-1), a89 = m_S.coeff(l-1,l), a98 = m_S.coeff(l,l-1), a99 = m_S.coeff(l,l), b11 = m_T.coeff(f,f), b12 = m_T.coeff(f,f+1), b22 = m_T.coeff(f+1,f+1), b88 = m_T.coeff(l-1,l-1), b89 = m_T.coeff(l-1,l), b99 = m_T.coeff(l,l); x = ( (a88/b88 - a11/b11)*(a99/b99 - a11/b11) - (a89/b99)*(a98/b88) + (a98/b88)*(b89/b99)*(a11/b11) ) * (b11/a21) + a12/b22 - (a11/b11)*(b12/b22); y = (a22/b22-a11/b11) - (a21/b11)*(b12/b22) - (a88/b88-a11/b11) - (a99/b99-a11/b11) + (a98/b88)*(b89/b99); z = a32/b22; } JRs G; for (Index k=f; k<=l-2; k++) { // variables for Householder reflections Vector2s essential2; Scalar tau, beta; Vector3s hr(x,y,z); // Q_k to annihilate S(k+1,k-1) and S(k+2,k-1) hr.makeHouseholderInPlace(tau, beta); essential2 = hr.template bottomRows<2>(); Index fc=(std::max)(k-1,Index(0)); // first col to update m_S.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data()); m_T.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data()); if (m_computeQZ) m_Q.template middleCols<3>(k).applyHouseholderOnTheRight(essential2, tau, m_workspace.data()); if (k>f) m_S.coeffRef(k+2,k-1) = m_S.coeffRef(k+1,k-1) = Scalar(0.0); // Z_{k1} to annihilate T(k+2,k+1) and T(k+2,k) hr << m_T.coeff(k+2,k+2),m_T.coeff(k+2,k),m_T.coeff(k+2,k+1); hr.makeHouseholderInPlace(tau, beta); essential2 = hr.template bottomRows<2>(); { Index lr = (std::min)(k+4,dim); // last row to update Map > tmp(m_workspace.data(),lr); // S tmp = m_S.template middleCols<2>(k).topRows(lr) * essential2; tmp += m_S.col(k+2).head(lr); m_S.col(k+2).head(lr) -= tau*tmp; m_S.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint(); // T tmp = m_T.template middleCols<2>(k).topRows(lr) * essential2; tmp += m_T.col(k+2).head(lr); m_T.col(k+2).head(lr) -= tau*tmp; m_T.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint(); } if (m_computeQZ) { // Z Map > tmp(m_workspace.data(),dim); tmp = essential2.adjoint()*(m_Z.template middleRows<2>(k)); tmp += m_Z.row(k+2); m_Z.row(k+2) -= tau*tmp; m_Z.template middleRows<2>(k) -= essential2 * (tau*tmp); } m_T.coeffRef(k+2,k) = m_T.coeffRef(k+2,k+1) = Scalar(0.0); // Z_{k2} to annihilate T(k+1,k) G.makeGivens(m_T.coeff(k+1,k+1), m_T.coeff(k+1,k)); m_S.applyOnTheRight(k+1,k,G); m_T.applyOnTheRight(k+1,k,G); // update Z if (m_computeQZ) m_Z.applyOnTheLeft(k+1,k,G.adjoint()); m_T.coeffRef(k+1,k) = Scalar(0.0); // update x,y,z x = m_S.coeff(k+1,k); y = m_S.coeff(k+2,k); if (k < l-2) z = m_S.coeff(k+3,k); } // loop over k // Q_{n-1} to annihilate y = S(l,l-2) G.makeGivens(x,y); m_S.applyOnTheLeft(l-1,l,G.adjoint()); m_T.applyOnTheLeft(l-1,l,G.adjoint()); if (m_computeQZ) m_Q.applyOnTheRight(l-1,l,G); m_S.coeffRef(l,l-2) = Scalar(0.0); // Z_{n-1} to annihilate T(l,l-1) G.makeGivens(m_T.coeff(l,l),m_T.coeff(l,l-1)); m_S.applyOnTheRight(l,l-1,G); m_T.applyOnTheRight(l,l-1,G); if (m_computeQZ) m_Z.applyOnTheLeft(l,l-1,G.adjoint()); m_T.coeffRef(l,l-1) = Scalar(0.0); } template RealQZ& RealQZ::compute(const MatrixType& A_in, const MatrixType& B_in, bool computeQZ) { const Index dim = A_in.cols(); eigen_assert (A_in.rows()==dim && A_in.cols()==dim && B_in.rows()==dim && B_in.cols()==dim && "Need square matrices of the same dimension"); m_isInitialized = true; m_computeQZ = computeQZ; m_S = A_in; m_T = B_in; m_workspace.resize(dim*2); m_global_iter = 0; // entrance point: hessenberg triangular decomposition hessenbergTriangular(); // compute L1 vector norms of T, S into m_normOfS, m_normOfT computeNorms(); Index l = dim-1, f, local_iter = 0; while (l>0 && local_iter0) m_S.coeffRef(f,f-1) = Scalar(0.0); if (f == l) // One root found { l--; local_iter = 0; } else if (f == l-1) // Two roots found { splitOffTwoRows(f); l -= 2; local_iter = 0; } else // No convergence yet { // if there's zero on diagonal of T, we can isolate an eigenvalue with Givens rotations Index z = findSmallDiagEntry(f,l); if (z>=f) { // zero found pushDownZero(z,f,l); } else { // We are sure now that S.block(f,f, l-f+1,l-f+1) is underuced upper-Hessenberg // and T.block(f,f, l-f+1,l-f+1) is invertible uper-triangular, which allows to // apply a QR-like iteration to rows and columns f..l. step(f,l, local_iter); local_iter++; m_global_iter++; } } } // check if we converged before reaching iterations limit m_info = (local_iter inline \ ComplexSchur >& \ ComplexSchur >::compute(const Matrix& matrix, bool computeU) \ { \ typedef Matrix MatrixType; \ typedef MatrixType::Scalar Scalar; \ typedef MatrixType::RealScalar RealScalar; \ typedef std::complex ComplexScalar; \ \ eigen_assert(matrix.cols() == matrix.rows()); \ \ m_matUisUptodate = false; \ if(matrix.cols() == 1) \ { \ m_matT = matrix.cast(); \ if(computeU) m_matU = ComplexMatrixType::Identity(1,1); \ m_info = Success; \ m_isInitialized = true; \ m_matUisUptodate = computeU; \ return *this; \ } \ lapack_int n = matrix.cols(), sdim, info; \ lapack_int lda = matrix.outerStride(); \ lapack_int matrix_order = MKLCOLROW; \ char jobvs, sort='N'; \ LAPACK_##MKLPREFIX_U##_SELECT1 select = 0; \ jobvs = (computeU) ? 'V' : 'N'; \ m_matU.resize(n, n); \ lapack_int ldvs = m_matU.outerStride(); \ m_matT = matrix; \ Matrix w; \ w.resize(n, 1);\ info = LAPACKE_##MKLPREFIX##gees( matrix_order, jobvs, sort, select, n, (MKLTYPE*)m_matT.data(), lda, &sdim, (MKLTYPE*)w.data(), (MKLTYPE*)m_matU.data(), ldvs ); \ if(info == 0) \ m_info = Success; \ else \ m_info = NoConvergence; \ \ m_isInitialized = true; \ m_matUisUptodate = computeU; \ return *this; \ \ } EIGEN_MKL_SCHUR_COMPLEX(dcomplex, MKL_Complex16, z, Z, ColMajor, LAPACK_COL_MAJOR) EIGEN_MKL_SCHUR_COMPLEX(scomplex, MKL_Complex8, c, C, ColMajor, LAPACK_COL_MAJOR) EIGEN_MKL_SCHUR_COMPLEX(dcomplex, MKL_Complex16, z, Z, RowMajor, LAPACK_ROW_MAJOR) EIGEN_MKL_SCHUR_COMPLEX(scomplex, MKL_Complex8, c, C, RowMajor, LAPACK_ROW_MAJOR) } // end namespace Eigen #endif // EIGEN_COMPLEX_SCHUR_MKL_H RcppEigen/inst/include/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h0000644000176200001440000003322712550462273025375 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Gael Guennebaud // Copyright (C) 2010,2012 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERALIZEDEIGENSOLVER_H #define EIGEN_GENERALIZEDEIGENSOLVER_H #include "./RealQZ.h" namespace Eigen { /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class GeneralizedEigenSolver * * \brief Computes the generalized eigenvalues and eigenvectors of a pair of general matrices * * \tparam _MatrixType the type of the matrices of which we are computing the * eigen-decomposition; this is expected to be an instantiation of the Matrix * class template. Currently, only real matrices are supported. * * The generalized eigenvalues and eigenvectors of a matrix pair \f$ A \f$ and \f$ B \f$ are scalars * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda Bv \f$. If * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V = * B V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we * have \f$ A = B V D V^{-1} \f$. This is called the generalized eigen-decomposition. * * The generalized eigenvalues and eigenvectors of a matrix pair may be complex, even when the * matrices are real. Moreover, the generalized eigenvalue might be infinite if the matrix B is * singular. To workaround this difficulty, the eigenvalues are provided as a pair of complex \f$ \alpha \f$ * and real \f$ \beta \f$ such that: \f$ \lambda_i = \alpha_i / \beta_i \f$. If \f$ \beta_i \f$ is (nearly) zero, * then one can consider the well defined left eigenvalue \f$ \mu = \beta_i / \alpha_i\f$ such that: * \f$ \mu_i A v_i = B v_i \f$, or even \f$ \mu_i u_i^T A = u_i^T B \f$ where \f$ u_i \f$ is * called the left eigenvector. * * Call the function compute() to compute the generalized eigenvalues and eigenvectors of * a given matrix pair. Alternatively, you can use the * GeneralizedEigenSolver(const MatrixType&, const MatrixType&, bool) constructor which computes the * eigenvalues and eigenvectors at construction time. Once the eigenvalue and * eigenvectors are computed, they can be retrieved with the eigenvalues() and * eigenvectors() functions. * * Here is an usage example of this class: * Example: \include GeneralizedEigenSolver.cpp * Output: \verbinclude GeneralizedEigenSolver.out * * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver */ template class GeneralizedEigenSolver { public: /** \brief Synonym for the template parameter \p _MatrixType. */ typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; /** \brief Scalar type for matrices of type #MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; /** \brief Complex scalar type for #MatrixType. * * This is \c std::complex if #Scalar is real (e.g., * \c float or \c double) and just \c Scalar if #Scalar is * complex. */ typedef std::complex ComplexScalar; /** \brief Type for vector of real scalar values eigenvalues as returned by betas(). * * This is a column vector with entries of type #Scalar. * The length of the vector is the size of #MatrixType. */ typedef Matrix VectorType; /** \brief Type for vector of complex scalar values eigenvalues as returned by betas(). * * This is a column vector with entries of type #ComplexScalar. * The length of the vector is the size of #MatrixType. */ typedef Matrix ComplexVectorType; /** \brief Expression type for the eigenvalues as returned by eigenvalues(). */ typedef CwiseBinaryOp,ComplexVectorType,VectorType> EigenvalueType; /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). * * This is a square matrix with entries of type #ComplexScalar. * The size is the same as the size of #MatrixType. */ typedef Matrix EigenvectorsType; /** \brief Default constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via EigenSolver::compute(const MatrixType&, bool). * * \sa compute() for an example. */ GeneralizedEigenSolver() : m_eivec(), m_alphas(), m_betas(), m_isInitialized(false), m_realQZ(), m_matS(), m_tmp() {} /** \brief Default constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa GeneralizedEigenSolver() */ GeneralizedEigenSolver(Index size) : m_eivec(size, size), m_alphas(size), m_betas(size), m_isInitialized(false), m_eigenvectorsOk(false), m_realQZ(size), m_matS(size, size), m_tmp(size) {} /** \brief Constructor; computes the generalized eigendecomposition of given matrix pair. * * \param[in] A Square matrix whose eigendecomposition is to be computed. * \param[in] B Square matrix whose eigendecomposition is to be computed. * \param[in] computeEigenvectors If true, both the eigenvectors and the * eigenvalues are computed; if false, only the eigenvalues are computed. * * This constructor calls compute() to compute the generalized eigenvalues * and eigenvectors. * * \sa compute() */ GeneralizedEigenSolver(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true) : m_eivec(A.rows(), A.cols()), m_alphas(A.cols()), m_betas(A.cols()), m_isInitialized(false), m_eigenvectorsOk(false), m_realQZ(A.cols()), m_matS(A.rows(), A.cols()), m_tmp(A.cols()) { compute(A, B, computeEigenvectors); } /* \brief Returns the computed generalized eigenvectors. * * \returns %Matrix whose columns are the (possibly complex) eigenvectors. * * \pre Either the constructor * GeneralizedEigenSolver(const MatrixType&,const MatrixType&, bool) or the member function * compute(const MatrixType&, const MatrixType& bool) has been called before, and * \p computeEigenvectors was set to true (the default). * * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The * eigenvectors are normalized to have (Euclidean) norm equal to one. The * matrix returned by this function is the matrix \f$ V \f$ in the * generalized eigendecomposition \f$ A = B V D V^{-1} \f$, if it exists. * * \sa eigenvalues() */ // EigenvectorsType eigenvectors() const; /** \brief Returns an expression of the computed generalized eigenvalues. * * \returns An expression of the column vector containing the eigenvalues. * * It is a shortcut for \code this->alphas().cwiseQuotient(this->betas()); \endcode * Not that betas might contain zeros. It is therefore not recommended to use this function, * but rather directly deal with the alphas and betas vectors. * * \pre Either the constructor * GeneralizedEigenSolver(const MatrixType&,const MatrixType&,bool) or the member function * compute(const MatrixType&,const MatrixType&,bool) has been called before. * * The eigenvalues are repeated according to their algebraic multiplicity, * so there are as many eigenvalues as rows in the matrix. The eigenvalues * are not sorted in any particular order. * * \sa alphas(), betas(), eigenvectors() */ EigenvalueType eigenvalues() const { eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized."); return EigenvalueType(m_alphas,m_betas); } /** \returns A const reference to the vectors containing the alpha values * * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j). * * \sa betas(), eigenvalues() */ ComplexVectorType alphas() const { eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized."); return m_alphas; } /** \returns A const reference to the vectors containing the beta values * * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j). * * \sa alphas(), eigenvalues() */ VectorType betas() const { eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized."); return m_betas; } /** \brief Computes generalized eigendecomposition of given matrix. * * \param[in] A Square matrix whose eigendecomposition is to be computed. * \param[in] B Square matrix whose eigendecomposition is to be computed. * \param[in] computeEigenvectors If true, both the eigenvectors and the * eigenvalues are computed; if false, only the eigenvalues are * computed. * \returns Reference to \c *this * * This function computes the eigenvalues of the real matrix \p matrix. * The eigenvalues() function can be used to retrieve them. If * \p computeEigenvectors is true, then the eigenvectors are also computed * and can be retrieved by calling eigenvectors(). * * The matrix is first reduced to real generalized Schur form using the RealQZ * class. The generalized Schur decomposition is then used to compute the eigenvalues * and eigenvectors. * * The cost of the computation is dominated by the cost of the * generalized Schur decomposition. * * This method reuses of the allocated data in the GeneralizedEigenSolver object. */ GeneralizedEigenSolver& compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true); ComputationInfo info() const { eigen_assert(m_isInitialized && "EigenSolver is not initialized."); return m_realQZ.info(); } /** Sets the maximal number of iterations allowed. */ GeneralizedEigenSolver& setMaxIterations(Index maxIters) { m_realQZ.setMaxIterations(maxIters); return *this; } protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL); } MatrixType m_eivec; ComplexVectorType m_alphas; VectorType m_betas; bool m_isInitialized; bool m_eigenvectorsOk; RealQZ m_realQZ; MatrixType m_matS; typedef Matrix ColumnVectorType; ColumnVectorType m_tmp; }; //template //typename GeneralizedEigenSolver::EigenvectorsType GeneralizedEigenSolver::eigenvectors() const //{ // eigen_assert(m_isInitialized && "EigenSolver is not initialized."); // eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); // Index n = m_eivec.cols(); // EigenvectorsType matV(n,n); // // TODO // return matV; //} template GeneralizedEigenSolver& GeneralizedEigenSolver::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors) { check_template_parameters(); using std::sqrt; using std::abs; eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows()); // Reduce to generalized real Schur form: // A = Q S Z and B = Q T Z m_realQZ.compute(A, B, computeEigenvectors); if (m_realQZ.info() == Success) { m_matS = m_realQZ.matrixS(); if (computeEigenvectors) m_eivec = m_realQZ.matrixZ().transpose(); // Compute eigenvalues from matS m_alphas.resize(A.cols()); m_betas.resize(A.cols()); Index i = 0; while (i < A.cols()) { if (i == A.cols() - 1 || m_matS.coeff(i+1, i) == Scalar(0)) { m_alphas.coeffRef(i) = m_matS.coeff(i, i); m_betas.coeffRef(i) = m_realQZ.matrixT().coeff(i,i); ++i; } else { Scalar p = Scalar(0.5) * (m_matS.coeff(i, i) - m_matS.coeff(i+1, i+1)); Scalar z = sqrt(abs(p * p + m_matS.coeff(i+1, i) * m_matS.coeff(i, i+1))); m_alphas.coeffRef(i) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, z); m_alphas.coeffRef(i+1) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, -z); m_betas.coeffRef(i) = m_realQZ.matrixT().coeff(i,i); m_betas.coeffRef(i+1) = m_realQZ.matrixT().coeff(i,i); i += 2; } } } m_isInitialized = true; m_eigenvectorsOk = false;//computeEigenvectors; return *this; } } // end namespace Eigen #endif // EIGEN_GENERALIZEDEIGENSOLVER_H RcppEigen/inst/include/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h0000644000176200001440000007303312647570720025351 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // Copyright (C) 2010 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SELFADJOINTEIGENSOLVER_H #define EIGEN_SELFADJOINTEIGENSOLVER_H #include "./Tridiagonalization.h" namespace Eigen { template class GeneralizedSelfAdjointEigenSolver; namespace internal { template struct direct_selfadjoint_eigenvalues; } /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class SelfAdjointEigenSolver * * \brief Computes eigenvalues and eigenvectors of selfadjoint matrices * * \tparam _MatrixType the type of the matrix of which we are computing the * eigendecomposition; this is expected to be an instantiation of the Matrix * class template. * * A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real * matrices, this means that the matrix is symmetric: it equals its * transpose. This class computes the eigenvalues and eigenvectors of a * selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors * \f$ v \f$ such that \f$ Av = \lambda v \f$. The eigenvalues of a * selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with * the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the * eigenvectors as its columns, then \f$ A = V D V^{-1} \f$ (for selfadjoint * matrices, the matrix \f$ V \f$ is always invertible). This is called the * eigendecomposition. * * The algorithm exploits the fact that the matrix is selfadjoint, making it * faster and more accurate than the general purpose eigenvalue algorithms * implemented in EigenSolver and ComplexEigenSolver. * * Only the \b lower \b triangular \b part of the input matrix is referenced. * * Call the function compute() to compute the eigenvalues and eigenvectors of * a given matrix. Alternatively, you can use the * SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes * the eigenvalues and eigenvectors at construction time. Once the eigenvalue * and eigenvectors are computed, they can be retrieved with the eigenvalues() * and eigenvectors() functions. * * The documentation for SelfAdjointEigenSolver(const MatrixType&, int) * contains an example of the typical use of this class. * * To solve the \em generalized eigenvalue problem \f$ Av = \lambda Bv \f$ and * the likes, see the class GeneralizedSelfAdjointEigenSolver. * * \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver */ template class SelfAdjointEigenSolver { public: typedef _MatrixType MatrixType; enum { Size = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; /** \brief Scalar type for matrices of type \p _MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef Matrix EigenvectorsType; /** \brief Real scalar type for \p _MatrixType. * * This is just \c Scalar if #Scalar is real (e.g., \c float or * \c double), and the type of the real part of \c Scalar if #Scalar is * complex. */ typedef typename NumTraits::Real RealScalar; friend struct internal::direct_selfadjoint_eigenvalues::IsComplex>; /** \brief Type for vector of eigenvalues as returned by eigenvalues(). * * This is a column vector with entries of type #RealScalar. * The length of the vector is the size of \p _MatrixType. */ typedef typename internal::plain_col_type::type RealVectorType; typedef Tridiagonalization TridiagonalizationType; /** \brief Default constructor for fixed-size matrices. * * The default constructor is useful in cases in which the user intends to * perform decompositions via compute(). This constructor * can only be used if \p _MatrixType is a fixed-size matrix; use * SelfAdjointEigenSolver(Index) for dynamic-size matrices. * * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out */ SelfAdjointEigenSolver() : m_eivec(), m_eivalues(), m_subdiag(), m_isInitialized(false) { } /** \brief Constructor, pre-allocates memory for dynamic-size matrices. * * \param [in] size Positive integer, size of the matrix whose * eigenvalues and eigenvectors will be computed. * * This constructor is useful for dynamic-size matrices, when the user * intends to perform decompositions via compute(). The \p size * parameter is only used as a hint. It is not an error to give a wrong * \p size, but it may impair performance. * * \sa compute() for an example */ SelfAdjointEigenSolver(Index size) : m_eivec(size, size), m_eivalues(size), m_subdiag(size > 1 ? size - 1 : 1), m_isInitialized(false) {} /** \brief Constructor; computes eigendecomposition of given matrix. * * \param[in] matrix Selfadjoint matrix whose eigendecomposition is to * be computed. Only the lower triangular part of the matrix is referenced. * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. * * This constructor calls compute(const MatrixType&, int) to compute the * eigenvalues of the matrix \p matrix. The eigenvectors are computed if * \p options equals #ComputeEigenvectors. * * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out * * \sa compute(const MatrixType&, int) */ SelfAdjointEigenSolver(const MatrixType& matrix, int options = ComputeEigenvectors) : m_eivec(matrix.rows(), matrix.cols()), m_eivalues(matrix.cols()), m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1), m_isInitialized(false) { compute(matrix, options); } /** \brief Computes eigendecomposition of given matrix. * * \param[in] matrix Selfadjoint matrix whose eigendecomposition is to * be computed. Only the lower triangular part of the matrix is referenced. * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. * \returns Reference to \c *this * * This function computes the eigenvalues of \p matrix. The eigenvalues() * function can be used to retrieve them. If \p options equals #ComputeEigenvectors, * then the eigenvectors are also computed and can be retrieved by * calling eigenvectors(). * * This implementation uses a symmetric QR algorithm. The matrix is first * reduced to tridiagonal form using the Tridiagonalization class. The * tridiagonal matrix is then brought to diagonal form with implicit * symmetric QR steps with Wilkinson shift. Details can be found in * Section 8.3 of Golub \& Van Loan, %Matrix Computations. * * The cost of the computation is about \f$ 9n^3 \f$ if the eigenvectors * are required and \f$ 4n^3/3 \f$ if they are not required. * * This method reuses the memory in the SelfAdjointEigenSolver object that * was allocated when the object was constructed, if the size of the * matrix does not change. * * Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out * * \sa SelfAdjointEigenSolver(const MatrixType&, int) */ SelfAdjointEigenSolver& compute(const MatrixType& matrix, int options = ComputeEigenvectors); /** \brief Computes eigendecomposition of given matrix using a direct algorithm * * This is a variant of compute(const MatrixType&, int options) which * directly solves the underlying polynomial equation. * * Currently only 3x3 matrices for which the sizes are known at compile time are supported (e.g., Matrix3d). * * This method is usually significantly faster than the QR algorithm * but it might also be less accurate. It is also worth noting that * for 3x3 matrices it involves trigonometric operations which are * not necessarily available for all scalar types. * * \sa compute(const MatrixType&, int options) */ SelfAdjointEigenSolver& computeDirect(const MatrixType& matrix, int options = ComputeEigenvectors); /** \brief Returns the eigenvectors of given matrix. * * \returns A const reference to the matrix whose columns are the eigenvectors. * * \pre The eigenvectors have been computed before. * * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The * eigenvectors are normalized to have (Euclidean) norm equal to one. If * this object was used to solve the eigenproblem for the selfadjoint * matrix \f$ A \f$, then the matrix returned by this function is the * matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$. * * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out * * \sa eigenvalues() */ const EigenvectorsType& eigenvectors() const { eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec; } /** \brief Returns the eigenvalues of given matrix. * * \returns A const reference to the column vector containing the eigenvalues. * * \pre The eigenvalues have been computed before. * * The eigenvalues are repeated according to their algebraic multiplicity, * so there are as many eigenvalues as rows in the matrix. The eigenvalues * are sorted in increasing order. * * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out * * \sa eigenvectors(), MatrixBase::eigenvalues() */ const RealVectorType& eigenvalues() const { eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); return m_eivalues; } /** \brief Computes the positive-definite square root of the matrix. * * \returns the positive-definite square root of the matrix * * \pre The eigenvalues and eigenvectors of a positive-definite matrix * have been computed before. * * The square root of a positive-definite matrix \f$ A \f$ is the * positive-definite matrix whose square equals \f$ A \f$. This function * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$. * * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out * * \sa operatorInverseSqrt(), * \ref MatrixFunctions_Module "MatrixFunctions Module" */ MatrixType operatorSqrt() const { eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint(); } /** \brief Computes the inverse square root of the matrix. * * \returns the inverse positive-definite square root of the matrix * * \pre The eigenvalues and eigenvectors of a positive-definite matrix * have been computed before. * * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is * cheaper than first computing the square root with operatorSqrt() and * then its inverse with MatrixBase::inverse(). * * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out * * \sa operatorSqrt(), MatrixBase::inverse(), * \ref MatrixFunctions_Module "MatrixFunctions Module" */ MatrixType operatorInverseSqrt() const { eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint(); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, \c NoConvergence otherwise. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); return m_info; } /** \brief Maximum number of iterations. * * The algorithm terminates if it does not converge within m_maxIterations * n iterations, where n * denotes the size of the matrix. This value is currently set to 30 (copied from LAPACK). */ static const int m_maxIterations = 30; #ifdef EIGEN2_SUPPORT SelfAdjointEigenSolver(const MatrixType& matrix, bool computeEigenvectors) : m_eivec(matrix.rows(), matrix.cols()), m_eivalues(matrix.cols()), m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1), m_isInitialized(false) { compute(matrix, computeEigenvectors); } SelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true) : m_eivec(matA.cols(), matA.cols()), m_eivalues(matA.cols()), m_subdiag(matA.cols() > 1 ? matA.cols() - 1 : 1), m_isInitialized(false) { static_cast*>(this)->compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly); } void compute(const MatrixType& matrix, bool computeEigenvectors) { compute(matrix, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly); } void compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true) { compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly); } #endif // EIGEN2_SUPPORT protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } EigenvectorsType m_eivec; RealVectorType m_eivalues; typename TridiagonalizationType::SubDiagonalType m_subdiag; ComputationInfo m_info; bool m_isInitialized; bool m_eigenvectorsOk; }; /** \internal * * \eigenvalues_module \ingroup Eigenvalues_Module * * Performs a QR step on a tridiagonal symmetric matrix represented as a * pair of two vectors \a diag and \a subdiag. * * \param matA the input selfadjoint matrix * \param hCoeffs returned Householder coefficients * * For compilation efficiency reasons, this procedure does not use eigen expression * for its arguments. * * Implemented from Golub's "Matrix Computations", algorithm 8.3.2: * "implicit symmetric QR step with Wilkinson shift" */ namespace internal { template static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n); } template SelfAdjointEigenSolver& SelfAdjointEigenSolver ::compute(const MatrixType& matrix, int options) { check_template_parameters(); using std::abs; eigen_assert(matrix.cols() == matrix.rows()); eigen_assert((options&~(EigVecMask|GenEigMask))==0 && (options&EigVecMask)!=EigVecMask && "invalid option parameter"); bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; Index n = matrix.cols(); m_eivalues.resize(n,1); if(n==1) { m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0)); if(computeEigenvectors) m_eivec.setOnes(n,n); m_info = Success; m_isInitialized = true; m_eigenvectorsOk = computeEigenvectors; return *this; } // declare some aliases RealVectorType& diag = m_eivalues; EigenvectorsType& mat = m_eivec; // map the matrix coefficients to [-1:1] to avoid over- and underflow. mat = matrix.template triangularView(); RealScalar scale = mat.cwiseAbs().maxCoeff(); if(scale==RealScalar(0)) scale = RealScalar(1); mat.template triangularView() /= scale; m_subdiag.resize(n-1); internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors); Index end = n-1; Index start = 0; Index iter = 0; // total number of iterations while (end>0) { for (Index i = start; i0 && m_subdiag[end-1]==0) { end--; } if (end<=0) break; // if we spent too many iterations, we give up iter++; if(iter > m_maxIterations * n) break; start = end - 1; while (start>0 && m_subdiag[start-1]!=0) start--; internal::tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n); } if (iter <= m_maxIterations * n) m_info = Success; else m_info = NoConvergence; // Sort eigenvalues and corresponding vectors. // TODO make the sort optional ? // TODO use a better sort algorithm !! if (m_info == Success) { for (Index i = 0; i < n-1; ++i) { Index k; m_eivalues.segment(i,n-i).minCoeff(&k); if (k > 0) { std::swap(m_eivalues[i], m_eivalues[k+i]); if(computeEigenvectors) m_eivec.col(i).swap(m_eivec.col(k+i)); } } } // scale back the eigen values m_eivalues *= scale; m_isInitialized = true; m_eigenvectorsOk = computeEigenvectors; return *this; } namespace internal { template struct direct_selfadjoint_eigenvalues { static inline void run(SolverType& eig, const typename SolverType::MatrixType& A, int options) { eig.compute(A,options); } }; template struct direct_selfadjoint_eigenvalues { typedef typename SolverType::MatrixType MatrixType; typedef typename SolverType::RealVectorType VectorType; typedef typename SolverType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef typename SolverType::EigenvectorsType EigenvectorsType; /** \internal * Computes the roots of the characteristic polynomial of \a m. * For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized. */ static inline void computeRoots(const MatrixType& m, VectorType& roots) { using std::sqrt; using std::atan2; using std::cos; using std::sin; const Scalar s_inv3 = Scalar(1.0)/Scalar(3.0); const Scalar s_sqrt3 = sqrt(Scalar(3.0)); // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The // eigenvalues are the roots to this equation, all guaranteed to be // real-valued, because the matrix is symmetric. Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(1,0)*m(2,0)*m(2,1) - m(0,0)*m(2,1)*m(2,1) - m(1,1)*m(2,0)*m(2,0) - m(2,2)*m(1,0)*m(1,0); Scalar c1 = m(0,0)*m(1,1) - m(1,0)*m(1,0) + m(0,0)*m(2,2) - m(2,0)*m(2,0) + m(1,1)*m(2,2) - m(2,1)*m(2,1); Scalar c2 = m(0,0) + m(1,1) + m(2,2); // Construct the parameters used in classifying the roots of the equation // and in solving the equation for the roots in closed form. Scalar c2_over_3 = c2*s_inv3; Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3; if(a_over_3 0, atan2 is in [0, pi] and theta is in [0, pi/3] Scalar cos_theta = cos(theta); Scalar sin_theta = sin(theta); // roots are already sorted, since cos is monotonically decreasing on [0, pi] roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3) roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3) roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta; } static inline bool extract_kernel(MatrixType& mat, Ref res, Ref representative) { using std::abs; Index i0; // Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal): mat.diagonal().cwiseAbs().maxCoeff(&i0); // mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector, // so let's save it: representative = mat.col(i0); Scalar n0, n1; VectorType c0, c1; n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm(); n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm(); if(n0>n1) res = c0/std::sqrt(n0); else res = c1/std::sqrt(n1); return true; } static inline void run(SolverType& solver, const MatrixType& mat, int options) { eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows()); eigen_assert((options&~(EigVecMask|GenEigMask))==0 && (options&EigVecMask)!=EigVecMask && "invalid option parameter"); bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; EigenvectorsType& eivecs = solver.m_eivec; VectorType& eivals = solver.m_eivalues; // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow. Scalar shift = mat.trace() / Scalar(3); // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later MatrixType scaledMat = mat.template selfadjointView(); scaledMat.diagonal().array() -= shift; Scalar scale = scaledMat.cwiseAbs().maxCoeff(); if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations // compute the eigenvalues computeRoots(scaledMat,eivals); // compute the eigenvectors if(computeEigenvectors) { if((eivals(2)-eivals(0))<=Eigen::NumTraits::epsilon()) { // All three eigenvalues are numerically the same eivecs.setIdentity(); } else { MatrixType tmp; tmp = scaledMat; // Compute the eigenvector of the most distinct eigenvalue Scalar d0 = eivals(2) - eivals(1); Scalar d1 = eivals(1) - eivals(0); Index k(0), l(2); if(d0 > d1) { std::swap(k,l); d0 = d1; } // Compute the eigenvector of index k { tmp.diagonal().array () -= eivals(k); // By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector. extract_kernel(tmp, eivecs.col(k), eivecs.col(l)); } // Compute eigenvector of index l if(d0<=2*Eigen::NumTraits::epsilon()*d1) { // If d0 is too small, then the two other eigenvalues are numerically the same, // and thus we only have to ortho-normalize the near orthogonal vector we saved above. eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l); eivecs.col(l).normalize(); } else { tmp = scaledMat; tmp.diagonal().array () -= eivals(l); VectorType dummy; extract_kernel(tmp, eivecs.col(l), dummy); } // Compute last eigenvector from the other two eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized(); } } // Rescale back to the original size. eivals *= scale; eivals.array() += shift; solver.m_info = Success; solver.m_isInitialized = true; solver.m_eigenvectorsOk = computeEigenvectors; } }; // 2x2 direct eigenvalues decomposition, code from Hauke Heibel template struct direct_selfadjoint_eigenvalues { typedef typename SolverType::MatrixType MatrixType; typedef typename SolverType::RealVectorType VectorType; typedef typename SolverType::Scalar Scalar; typedef typename SolverType::EigenvectorsType EigenvectorsType; static inline void computeRoots(const MatrixType& m, VectorType& roots) { using std::sqrt; const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0))); const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1)); roots(0) = t1 - t0; roots(1) = t1 + t0; } static inline void run(SolverType& solver, const MatrixType& mat, int options) { using std::sqrt; using std::abs; eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows()); eigen_assert((options&~(EigVecMask|GenEigMask))==0 && (options&EigVecMask)!=EigVecMask && "invalid option parameter"); bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; EigenvectorsType& eivecs = solver.m_eivec; VectorType& eivals = solver.m_eivalues; // map the matrix coefficients to [-1:1] to avoid over- and underflow. Scalar scale = mat.cwiseAbs().maxCoeff(); scale = (std::max)(scale,Scalar(1)); MatrixType scaledMat = mat / scale; // Compute the eigenvalues computeRoots(scaledMat,eivals); // compute the eigen vectors if(computeEigenvectors) { if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits::epsilon()) { eivecs.setIdentity(); } else { scaledMat.diagonal().array () -= eivals(1); Scalar a2 = numext::abs2(scaledMat(0,0)); Scalar c2 = numext::abs2(scaledMat(1,1)); Scalar b2 = numext::abs2(scaledMat(1,0)); if(a2>c2) { eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0); eivecs.col(1) /= sqrt(a2+b2); } else { eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0); eivecs.col(1) /= sqrt(c2+b2); } eivecs.col(0) << eivecs.col(1).unitOrthogonal(); } } // Rescale back to the original size. eivals *= scale; solver.m_info = Success; solver.m_isInitialized = true; solver.m_eigenvectorsOk = computeEigenvectors; } }; } template SelfAdjointEigenSolver& SelfAdjointEigenSolver ::computeDirect(const MatrixType& matrix, int options) { internal::direct_selfadjoint_eigenvalues::IsComplex>::run(*this,matrix,options); return *this; } namespace internal { template static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n) { using std::abs; RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5); RealScalar e = subdiag[end-1]; // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still // underflow thus leading to inf/NaN values when using the following commented code: // RealScalar e2 = numext::abs2(subdiag[end-1]); // RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2)); // This explain the following, somewhat more complicated, version: RealScalar mu = diag[end]; if(td==0) mu -= abs(e); else { RealScalar e2 = numext::abs2(subdiag[end-1]); RealScalar h = numext::hypot(td,e); if(e2==0) mu -= (e / (td + (td>0 ? 1 : -1))) * (e / h); else mu -= e2 / (td + (td>0 ? h : -h)); } RealScalar x = diag[start] - mu; RealScalar z = subdiag[start]; for (Index k = start; k < end; ++k) { JacobiRotation rot; rot.makeGivens(x, z); // do T = G' T G RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k]; RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1]; diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]); diag[k+1] = rot.s() * sdk + rot.c() * dkp1; subdiag[k] = rot.c() * sdk - rot.s() * dkp1; if (k > start) subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z; x = subdiag[k]; if (k < end - 1) { z = -rot.s() * subdiag[k+1]; subdiag[k + 1] = rot.c() * subdiag[k+1]; } // apply the givens rotation to the unit matrix Q = Q * G if (matrixQ) { Map > q(matrixQ,n,n); q.applyOnTheRight(k,k+1,rot); } } } } // end namespace internal } // end namespace Eigen #endif // EIGEN_SELFADJOINTEIGENSOLVER_H RcppEigen/inst/include/Eigen/src/Eigenvalues/Tridiagonalization.h0000644000176200001440000005362112253717461024636 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2010 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRIDIAGONALIZATION_H #define EIGEN_TRIDIAGONALIZATION_H namespace Eigen { namespace internal { template struct TridiagonalizationMatrixTReturnType; template struct traits > { typedef typename MatrixType::PlainObject ReturnType; }; template void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs); } /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class Tridiagonalization * * \brief Tridiagonal decomposition of a selfadjoint matrix * * \tparam _MatrixType the type of the matrix of which we are computing the * tridiagonal decomposition; this is expected to be an instantiation of the * Matrix class template. * * This class performs a tridiagonal decomposition of a selfadjoint matrix \f$ A \f$ such that: * \f$ A = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real symmetric tridiagonal matrix. * * A tridiagonal matrix is a matrix which has nonzero elements only on the * main diagonal and the first diagonal below and above it. The Hessenberg * decomposition of a selfadjoint matrix is in fact a tridiagonal * decomposition. This class is used in SelfAdjointEigenSolver to compute the * eigenvalues and eigenvectors of a selfadjoint matrix. * * Call the function compute() to compute the tridiagonal decomposition of a * given matrix. Alternatively, you can use the Tridiagonalization(const MatrixType&) * constructor which computes the tridiagonal Schur decomposition at * construction time. Once the decomposition is computed, you can use the * matrixQ() and matrixT() functions to retrieve the matrices Q and T in the * decomposition. * * The documentation of Tridiagonalization(const MatrixType&) contains an * example of the typical use of this class. * * \sa class HessenbergDecomposition, class SelfAdjointEigenSolver */ template class Tridiagonalization { public: /** \brief Synonym for the template parameter \p _MatrixType. */ typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; enum { Size = MatrixType::RowsAtCompileTime, SizeMinusOne = Size == Dynamic ? Dynamic : (Size > 1 ? Size - 1 : 1), Options = MatrixType::Options, MaxSize = MatrixType::MaxRowsAtCompileTime, MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : (MaxSize > 1 ? MaxSize - 1 : 1) }; typedef Matrix CoeffVectorType; typedef typename internal::plain_col_type::type DiagonalType; typedef Matrix SubDiagonalType; typedef typename internal::remove_all::type MatrixTypeRealView; typedef internal::TridiagonalizationMatrixTReturnType MatrixTReturnType; typedef typename internal::conditional::IsComplex, typename internal::add_const_on_value_type::RealReturnType>::type, const Diagonal >::type DiagonalReturnType; typedef typename internal::conditional::IsComplex, typename internal::add_const_on_value_type >::RealReturnType>::type, const Diagonal< Block > >::type SubDiagonalReturnType; /** \brief Return type of matrixQ() */ typedef HouseholderSequence::type> HouseholderSequenceType; /** \brief Default constructor. * * \param [in] size Positive integer, size of the matrix whose tridiagonal * decomposition will be computed. * * The default constructor is useful in cases in which the user intends to * perform decompositions via compute(). The \p size parameter is only * used as a hint. It is not an error to give a wrong \p size, but it may * impair performance. * * \sa compute() for an example. */ Tridiagonalization(Index size = Size==Dynamic ? 2 : Size) : m_matrix(size,size), m_hCoeffs(size > 1 ? size-1 : 1), m_isInitialized(false) {} /** \brief Constructor; computes tridiagonal decomposition of given matrix. * * \param[in] matrix Selfadjoint matrix whose tridiagonal decomposition * is to be computed. * * This constructor calls compute() to compute the tridiagonal decomposition. * * Example: \include Tridiagonalization_Tridiagonalization_MatrixType.cpp * Output: \verbinclude Tridiagonalization_Tridiagonalization_MatrixType.out */ Tridiagonalization(const MatrixType& matrix) : m_matrix(matrix), m_hCoeffs(matrix.cols() > 1 ? matrix.cols()-1 : 1), m_isInitialized(false) { internal::tridiagonalization_inplace(m_matrix, m_hCoeffs); m_isInitialized = true; } /** \brief Computes tridiagonal decomposition of given matrix. * * \param[in] matrix Selfadjoint matrix whose tridiagonal decomposition * is to be computed. * \returns Reference to \c *this * * The tridiagonal decomposition is computed by bringing the columns of * the matrix successively in the required form using Householder * reflections. The cost is \f$ 4n^3/3 \f$ flops, where \f$ n \f$ denotes * the size of the given matrix. * * This method reuses of the allocated data in the Tridiagonalization * object, if the size of the matrix does not change. * * Example: \include Tridiagonalization_compute.cpp * Output: \verbinclude Tridiagonalization_compute.out */ Tridiagonalization& compute(const MatrixType& matrix) { m_matrix = matrix; m_hCoeffs.resize(matrix.rows()-1, 1); internal::tridiagonalization_inplace(m_matrix, m_hCoeffs); m_isInitialized = true; return *this; } /** \brief Returns the Householder coefficients. * * \returns a const reference to the vector of Householder coefficients * * \pre Either the constructor Tridiagonalization(const MatrixType&) or * the member function compute(const MatrixType&) has been called before * to compute the tridiagonal decomposition of a matrix. * * The Householder coefficients allow the reconstruction of the matrix * \f$ Q \f$ in the tridiagonal decomposition from the packed data. * * Example: \include Tridiagonalization_householderCoefficients.cpp * Output: \verbinclude Tridiagonalization_householderCoefficients.out * * \sa packedMatrix(), \ref Householder_Module "Householder module" */ inline CoeffVectorType householderCoefficients() const { eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); return m_hCoeffs; } /** \brief Returns the internal representation of the decomposition * * \returns a const reference to a matrix with the internal representation * of the decomposition. * * \pre Either the constructor Tridiagonalization(const MatrixType&) or * the member function compute(const MatrixType&) has been called before * to compute the tridiagonal decomposition of a matrix. * * The returned matrix contains the following information: * - the strict upper triangular part is equal to the input matrix A. * - the diagonal and lower sub-diagonal represent the real tridiagonal * symmetric matrix T. * - the rest of the lower part contains the Householder vectors that, * combined with Householder coefficients returned by * householderCoefficients(), allows to reconstruct the matrix Q as * \f$ Q = H_{N-1} \ldots H_1 H_0 \f$. * Here, the matrices \f$ H_i \f$ are the Householder transformations * \f$ H_i = (I - h_i v_i v_i^T) \f$ * where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and * \f$ v_i \f$ is the Householder vector defined by * \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$ * with M the matrix returned by this function. * * See LAPACK for further details on this packed storage. * * Example: \include Tridiagonalization_packedMatrix.cpp * Output: \verbinclude Tridiagonalization_packedMatrix.out * * \sa householderCoefficients() */ inline const MatrixType& packedMatrix() const { eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); return m_matrix; } /** \brief Returns the unitary matrix Q in the decomposition * * \returns object representing the matrix Q * * \pre Either the constructor Tridiagonalization(const MatrixType&) or * the member function compute(const MatrixType&) has been called before * to compute the tridiagonal decomposition of a matrix. * * This function returns a light-weight object of template class * HouseholderSequence. You can either apply it directly to a matrix or * you can convert it to a matrix of type #MatrixType. * * \sa Tridiagonalization(const MatrixType&) for an example, * matrixT(), class HouseholderSequence */ HouseholderSequenceType matrixQ() const { eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate()) .setLength(m_matrix.rows() - 1) .setShift(1); } /** \brief Returns an expression of the tridiagonal matrix T in the decomposition * * \returns expression object representing the matrix T * * \pre Either the constructor Tridiagonalization(const MatrixType&) or * the member function compute(const MatrixType&) has been called before * to compute the tridiagonal decomposition of a matrix. * * Currently, this function can be used to extract the matrix T from internal * data and copy it to a dense matrix object. In most cases, it may be * sufficient to directly use the packed matrix or the vector expressions * returned by diagonal() and subDiagonal() instead of creating a new * dense copy matrix with this function. * * \sa Tridiagonalization(const MatrixType&) for an example, * matrixQ(), packedMatrix(), diagonal(), subDiagonal() */ MatrixTReturnType matrixT() const { eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); return MatrixTReturnType(m_matrix.real()); } /** \brief Returns the diagonal of the tridiagonal matrix T in the decomposition. * * \returns expression representing the diagonal of T * * \pre Either the constructor Tridiagonalization(const MatrixType&) or * the member function compute(const MatrixType&) has been called before * to compute the tridiagonal decomposition of a matrix. * * Example: \include Tridiagonalization_diagonal.cpp * Output: \verbinclude Tridiagonalization_diagonal.out * * \sa matrixT(), subDiagonal() */ DiagonalReturnType diagonal() const; /** \brief Returns the subdiagonal of the tridiagonal matrix T in the decomposition. * * \returns expression representing the subdiagonal of T * * \pre Either the constructor Tridiagonalization(const MatrixType&) or * the member function compute(const MatrixType&) has been called before * to compute the tridiagonal decomposition of a matrix. * * \sa diagonal() for an example, matrixT() */ SubDiagonalReturnType subDiagonal() const; protected: MatrixType m_matrix; CoeffVectorType m_hCoeffs; bool m_isInitialized; }; template typename Tridiagonalization::DiagonalReturnType Tridiagonalization::diagonal() const { eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); return m_matrix.diagonal(); } template typename Tridiagonalization::SubDiagonalReturnType Tridiagonalization::subDiagonal() const { eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); Index n = m_matrix.rows(); return Block(m_matrix, 1, 0, n-1,n-1).diagonal(); } namespace internal { /** \internal * Performs a tridiagonal decomposition of the selfadjoint matrix \a matA in-place. * * \param[in,out] matA On input the selfadjoint matrix. Only the \b lower triangular part is referenced. * On output, the strict upper part is left unchanged, and the lower triangular part * represents the T and Q matrices in packed format has detailed below. * \param[out] hCoeffs returned Householder coefficients (see below) * * On output, the tridiagonal selfadjoint matrix T is stored in the diagonal * and lower sub-diagonal of the matrix \a matA. * The unitary matrix Q is represented in a compact way as a product of * Householder reflectors \f$ H_i \f$ such that: * \f$ Q = H_{N-1} \ldots H_1 H_0 \f$. * The Householder reflectors are defined as * \f$ H_i = (I - h_i v_i v_i^T) \f$ * where \f$ h_i = hCoeffs[i]\f$ is the \f$ i \f$th Householder coefficient and * \f$ v_i \f$ is the Householder vector defined by * \f$ v_i = [ 0, \ldots, 0, 1, matA(i+2,i), \ldots, matA(N-1,i) ]^T \f$. * * Implemented from Golub's "Matrix Computations", algorithm 8.3.1. * * \sa Tridiagonalization::packedMatrix() */ template void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs) { using numext::conj; typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; Index n = matA.rows(); eigen_assert(n==matA.cols()); eigen_assert(n==hCoeffs.size()+1 || n==1); for (Index i = 0; i() * (conj(h) * matA.col(i).tail(remainingSize))); hCoeffs.tail(n-i-1) += (conj(h)*Scalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1); matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView() .rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), -1); matA.col(i).coeffRef(i+1) = beta; hCoeffs.coeffRef(i) = h; } } // forward declaration, implementation at the end of this file template::IsComplex> struct tridiagonalization_inplace_selector; /** \brief Performs a full tridiagonalization in place * * \param[in,out] mat On input, the selfadjoint matrix whose tridiagonal * decomposition is to be computed. Only the lower triangular part referenced. * The rest is left unchanged. On output, the orthogonal matrix Q * in the decomposition if \p extractQ is true. * \param[out] diag The diagonal of the tridiagonal matrix T in the * decomposition. * \param[out] subdiag The subdiagonal of the tridiagonal matrix T in * the decomposition. * \param[in] extractQ If true, the orthogonal matrix Q in the * decomposition is computed and stored in \p mat. * * Computes the tridiagonal decomposition of the selfadjoint matrix \p mat in place * such that \f$ mat = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real * symmetric tridiagonal matrix. * * The tridiagonal matrix T is passed to the output parameters \p diag and \p subdiag. If * \p extractQ is true, then the orthogonal matrix Q is passed to \p mat. Otherwise the lower * part of the matrix \p mat is destroyed. * * The vectors \p diag and \p subdiag are not resized. The function * assumes that they are already of the correct size. The length of the * vector \p diag should equal the number of rows in \p mat, and the * length of the vector \p subdiag should be one left. * * This implementation contains an optimized path for 3-by-3 matrices * which is especially useful for plane fitting. * * \note Currently, it requires two temporary vectors to hold the intermediate * Householder coefficients, and to reconstruct the matrix Q from the Householder * reflectors. * * Example (this uses the same matrix as the example in * Tridiagonalization::Tridiagonalization(const MatrixType&)): * \include Tridiagonalization_decomposeInPlace.cpp * Output: \verbinclude Tridiagonalization_decomposeInPlace.out * * \sa class Tridiagonalization */ template void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) { eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1); tridiagonalization_inplace_selector::run(mat, diag, subdiag, extractQ); } /** \internal * General full tridiagonalization */ template struct tridiagonalization_inplace_selector { typedef typename Tridiagonalization::CoeffVectorType CoeffVectorType; typedef typename Tridiagonalization::HouseholderSequenceType HouseholderSequenceType; typedef typename MatrixType::Index Index; template static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) { CoeffVectorType hCoeffs(mat.cols()-1); tridiagonalization_inplace(mat,hCoeffs); diag = mat.diagonal().real(); subdiag = mat.template diagonal<-1>().real(); if(extractQ) mat = HouseholderSequenceType(mat, hCoeffs.conjugate()) .setLength(mat.rows() - 1) .setShift(1); } }; /** \internal * Specialization for 3x3 real matrices. * Especially useful for plane fitting. */ template struct tridiagonalization_inplace_selector { typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; template static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) { using std::sqrt; diag[0] = mat(0,0); RealScalar v1norm2 = numext::abs2(mat(2,0)); if(v1norm2 == RealScalar(0)) { diag[1] = mat(1,1); diag[2] = mat(2,2); subdiag[0] = mat(1,0); subdiag[1] = mat(2,1); if (extractQ) mat.setIdentity(); } else { RealScalar beta = sqrt(numext::abs2(mat(1,0)) + v1norm2); RealScalar invBeta = RealScalar(1)/beta; Scalar m01 = mat(1,0) * invBeta; Scalar m02 = mat(2,0) * invBeta; Scalar q = RealScalar(2)*m01*mat(2,1) + m02*(mat(2,2) - mat(1,1)); diag[1] = mat(1,1) + m02*q; diag[2] = mat(2,2) - m02*q; subdiag[0] = beta; subdiag[1] = mat(2,1) - m01 * q; if (extractQ) { mat << 1, 0, 0, 0, m01, m02, 0, m02, -m01; } } } }; /** \internal * Trivial specialization for 1x1 matrices */ template struct tridiagonalization_inplace_selector { typedef typename MatrixType::Scalar Scalar; template static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ) { diag(0,0) = numext::real(mat(0,0)); if(extractQ) mat(0,0) = Scalar(1); } }; /** \internal * \eigenvalues_module \ingroup Eigenvalues_Module * * \brief Expression type for return value of Tridiagonalization::matrixT() * * \tparam MatrixType type of underlying dense matrix */ template struct TridiagonalizationMatrixTReturnType : public ReturnByValue > { typedef typename MatrixType::Index Index; public: /** \brief Constructor. * * \param[in] mat The underlying dense matrix */ TridiagonalizationMatrixTReturnType(const MatrixType& mat) : m_matrix(mat) { } template inline void evalTo(ResultType& result) const { result.setZero(); result.template diagonal<1>() = m_matrix.template diagonal<-1>().conjugate(); result.diagonal() = m_matrix.diagonal(); result.template diagonal<-1>() = m_matrix.template diagonal<-1>(); } Index rows() const { return m_matrix.rows(); } Index cols() const { return m_matrix.cols(); } protected: typename MatrixType::Nested m_matrix; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_TRIDIAGONALIZATION_H RcppEigen/inst/include/Eigen/src/Eigenvalues/EigenSolver.h0000644000176200001440000005252012550462273023220 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2010,2012 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EIGENSOLVER_H #define EIGEN_EIGENSOLVER_H #include "./RealSchur.h" namespace Eigen { /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class EigenSolver * * \brief Computes eigenvalues and eigenvectors of general matrices * * \tparam _MatrixType the type of the matrix of which we are computing the * eigendecomposition; this is expected to be an instantiation of the Matrix * class template. Currently, only real matrices are supported. * * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v \f$. If * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V = * V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we * have \f$ A = V D V^{-1} \f$. This is called the eigendecomposition. * * The eigenvalues and eigenvectors of a matrix may be complex, even when the * matrix is real. However, we can choose real matrices \f$ V \f$ and \f$ D * \f$ satisfying \f$ A V = V D \f$, just like the eigendecomposition, if the * matrix \f$ D \f$ is not required to be diagonal, but if it is allowed to * have blocks of the form * \f[ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f] * (where \f$ u \f$ and \f$ v \f$ are real numbers) on the diagonal. These * blocks correspond to complex eigenvalue pairs \f$ u \pm iv \f$. We call * this variant of the eigendecomposition the pseudo-eigendecomposition. * * Call the function compute() to compute the eigenvalues and eigenvectors of * a given matrix. Alternatively, you can use the * EigenSolver(const MatrixType&, bool) constructor which computes the * eigenvalues and eigenvectors at construction time. Once the eigenvalue and * eigenvectors are computed, they can be retrieved with the eigenvalues() and * eigenvectors() functions. The pseudoEigenvalueMatrix() and * pseudoEigenvectors() methods allow the construction of the * pseudo-eigendecomposition. * * The documentation for EigenSolver(const MatrixType&, bool) contains an * example of the typical use of this class. * * \note The implementation is adapted from * JAMA (public domain). * Their code is based on EISPACK. * * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver */ template class EigenSolver { public: /** \brief Synonym for the template parameter \p _MatrixType. */ typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; /** \brief Scalar type for matrices of type #MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; /** \brief Complex scalar type for #MatrixType. * * This is \c std::complex if #Scalar is real (e.g., * \c float or \c double) and just \c Scalar if #Scalar is * complex. */ typedef std::complex ComplexScalar; /** \brief Type for vector of eigenvalues as returned by eigenvalues(). * * This is a column vector with entries of type #ComplexScalar. * The length of the vector is the size of #MatrixType. */ typedef Matrix EigenvalueType; /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). * * This is a square matrix with entries of type #ComplexScalar. * The size is the same as the size of #MatrixType. */ typedef Matrix EigenvectorsType; /** \brief Default constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via EigenSolver::compute(const MatrixType&, bool). * * \sa compute() for an example. */ EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {} /** \brief Default constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa EigenSolver() */ EigenSolver(Index size) : m_eivec(size, size), m_eivalues(size), m_isInitialized(false), m_eigenvectorsOk(false), m_realSchur(size), m_matT(size, size), m_tmp(size) {} /** \brief Constructor; computes eigendecomposition of given matrix. * * \param[in] matrix Square matrix whose eigendecomposition is to be computed. * \param[in] computeEigenvectors If true, both the eigenvectors and the * eigenvalues are computed; if false, only the eigenvalues are * computed. * * This constructor calls compute() to compute the eigenvalues * and eigenvectors. * * Example: \include EigenSolver_EigenSolver_MatrixType.cpp * Output: \verbinclude EigenSolver_EigenSolver_MatrixType.out * * \sa compute() */ EigenSolver(const MatrixType& matrix, bool computeEigenvectors = true) : m_eivec(matrix.rows(), matrix.cols()), m_eivalues(matrix.cols()), m_isInitialized(false), m_eigenvectorsOk(false), m_realSchur(matrix.cols()), m_matT(matrix.rows(), matrix.cols()), m_tmp(matrix.cols()) { compute(matrix, computeEigenvectors); } /** \brief Returns the eigenvectors of given matrix. * * \returns %Matrix whose columns are the (possibly complex) eigenvectors. * * \pre Either the constructor * EigenSolver(const MatrixType&,bool) or the member function * compute(const MatrixType&, bool) has been called before, and * \p computeEigenvectors was set to true (the default). * * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The * eigenvectors are normalized to have (Euclidean) norm equal to one. The * matrix returned by this function is the matrix \f$ V \f$ in the * eigendecomposition \f$ A = V D V^{-1} \f$, if it exists. * * Example: \include EigenSolver_eigenvectors.cpp * Output: \verbinclude EigenSolver_eigenvectors.out * * \sa eigenvalues(), pseudoEigenvectors() */ EigenvectorsType eigenvectors() const; /** \brief Returns the pseudo-eigenvectors of given matrix. * * \returns Const reference to matrix whose columns are the pseudo-eigenvectors. * * \pre Either the constructor * EigenSolver(const MatrixType&,bool) or the member function * compute(const MatrixType&, bool) has been called before, and * \p computeEigenvectors was set to true (the default). * * The real matrix \f$ V \f$ returned by this function and the * block-diagonal matrix \f$ D \f$ returned by pseudoEigenvalueMatrix() * satisfy \f$ AV = VD \f$. * * Example: \include EigenSolver_pseudoEigenvectors.cpp * Output: \verbinclude EigenSolver_pseudoEigenvectors.out * * \sa pseudoEigenvalueMatrix(), eigenvectors() */ const MatrixType& pseudoEigenvectors() const { eigen_assert(m_isInitialized && "EigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec; } /** \brief Returns the block-diagonal matrix in the pseudo-eigendecomposition. * * \returns A block-diagonal matrix. * * \pre Either the constructor * EigenSolver(const MatrixType&,bool) or the member function * compute(const MatrixType&, bool) has been called before. * * The matrix \f$ D \f$ returned by this function is real and * block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2 * blocks of the form * \f$ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f$. * These blocks are not sorted in any particular order. * The matrix \f$ D \f$ and the matrix \f$ V \f$ returned by * pseudoEigenvectors() satisfy \f$ AV = VD \f$. * * \sa pseudoEigenvectors() for an example, eigenvalues() */ MatrixType pseudoEigenvalueMatrix() const; /** \brief Returns the eigenvalues of given matrix. * * \returns A const reference to the column vector containing the eigenvalues. * * \pre Either the constructor * EigenSolver(const MatrixType&,bool) or the member function * compute(const MatrixType&, bool) has been called before. * * The eigenvalues are repeated according to their algebraic multiplicity, * so there are as many eigenvalues as rows in the matrix. The eigenvalues * are not sorted in any particular order. * * Example: \include EigenSolver_eigenvalues.cpp * Output: \verbinclude EigenSolver_eigenvalues.out * * \sa eigenvectors(), pseudoEigenvalueMatrix(), * MatrixBase::eigenvalues() */ const EigenvalueType& eigenvalues() const { eigen_assert(m_isInitialized && "EigenSolver is not initialized."); return m_eivalues; } /** \brief Computes eigendecomposition of given matrix. * * \param[in] matrix Square matrix whose eigendecomposition is to be computed. * \param[in] computeEigenvectors If true, both the eigenvectors and the * eigenvalues are computed; if false, only the eigenvalues are * computed. * \returns Reference to \c *this * * This function computes the eigenvalues of the real matrix \p matrix. * The eigenvalues() function can be used to retrieve them. If * \p computeEigenvectors is true, then the eigenvectors are also computed * and can be retrieved by calling eigenvectors(). * * The matrix is first reduced to real Schur form using the RealSchur * class. The Schur decomposition is then used to compute the eigenvalues * and eigenvectors. * * The cost of the computation is dominated by the cost of the * Schur decomposition, which is very approximately \f$ 25n^3 \f$ * (where \f$ n \f$ is the size of the matrix) if \p computeEigenvectors * is true, and \f$ 10n^3 \f$ if \p computeEigenvectors is false. * * This method reuses of the allocated data in the EigenSolver object. * * Example: \include EigenSolver_compute.cpp * Output: \verbinclude EigenSolver_compute.out */ EigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true); ComputationInfo info() const { eigen_assert(m_isInitialized && "EigenSolver is not initialized."); return m_realSchur.info(); } /** \brief Sets the maximum number of iterations allowed. */ EigenSolver& setMaxIterations(Index maxIters) { m_realSchur.setMaxIterations(maxIters); return *this; } /** \brief Returns the maximum number of iterations. */ Index getMaxIterations() { return m_realSchur.getMaxIterations(); } private: void doComputeEigenvectors(); protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL); } MatrixType m_eivec; EigenvalueType m_eivalues; bool m_isInitialized; bool m_eigenvectorsOk; RealSchur m_realSchur; MatrixType m_matT; typedef Matrix ColumnVectorType; ColumnVectorType m_tmp; }; template MatrixType EigenSolver::pseudoEigenvalueMatrix() const { eigen_assert(m_isInitialized && "EigenSolver is not initialized."); Index n = m_eivalues.rows(); MatrixType matD = MatrixType::Zero(n,n); for (Index i=0; i(i,i) << numext::real(m_eivalues.coeff(i)), numext::imag(m_eivalues.coeff(i)), -numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i)); ++i; } } return matD; } template typename EigenSolver::EigenvectorsType EigenSolver::eigenvectors() const { eigen_assert(m_isInitialized && "EigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); Index n = m_eivec.cols(); EigenvectorsType matV(n,n); for (Index j=0; j(); matV.col(j).normalize(); } else { // we have a pair of complex eigen values for (Index i=0; i EigenSolver& EigenSolver::compute(const MatrixType& matrix, bool computeEigenvectors) { check_template_parameters(); using std::sqrt; using std::abs; eigen_assert(matrix.cols() == matrix.rows()); // Reduce to real Schur form. m_realSchur.compute(matrix, computeEigenvectors); if (m_realSchur.info() == Success) { m_matT = m_realSchur.matrixT(); if (computeEigenvectors) m_eivec = m_realSchur.matrixU(); // Compute eigenvalues from matT m_eivalues.resize(matrix.cols()); Index i = 0; while (i < matrix.cols()) { if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0)) { m_eivalues.coeffRef(i) = m_matT.coeff(i, i); ++i; } else { Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1)); Scalar z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1))); m_eivalues.coeffRef(i) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z); m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z); i += 2; } } // Compute eigenvectors. if (computeEigenvectors) doComputeEigenvectors(); } m_isInitialized = true; m_eigenvectorsOk = computeEigenvectors; return *this; } // Complex scalar division. template std::complex cdiv(const Scalar& xr, const Scalar& xi, const Scalar& yr, const Scalar& yi) { using std::abs; Scalar r,d; if (abs(yr) > abs(yi)) { r = yi/yr; d = yr + r*yi; return std::complex((xr + r*xi)/d, (xi - r*xr)/d); } else { r = yr/yi; d = yi + r*yr; return std::complex((r*xr + xi)/d, (r*xi - xr)/d); } } template void EigenSolver::doComputeEigenvectors() { using std::abs; const Index size = m_eivec.cols(); const Scalar eps = NumTraits::epsilon(); // inefficient! this is already computed in RealSchur Scalar norm(0); for (Index j = 0; j < size; ++j) { norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum(); } // Backsubstitute to find vectors of upper triangular form if (norm == 0.0) { return; } for (Index n = size-1; n >= 0; n--) { Scalar p = m_eivalues.coeff(n).real(); Scalar q = m_eivalues.coeff(n).imag(); // Scalar vector if (q == Scalar(0)) { Scalar lastr(0), lastw(0); Index l = n; m_matT.coeffRef(n,n) = 1.0; for (Index i = n-1; i >= 0; i--) { Scalar w = m_matT.coeff(i,i) - p; Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1)); if (m_eivalues.coeff(i).imag() < 0.0) { lastw = w; lastr = r; } else { l = i; if (m_eivalues.coeff(i).imag() == 0.0) { if (w != 0.0) m_matT.coeffRef(i,n) = -r / w; else m_matT.coeffRef(i,n) = -r / (eps * norm); } else // Solve real equations { Scalar x = m_matT.coeff(i,i+1); Scalar y = m_matT.coeff(i+1,i); Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag(); Scalar t = (x * lastr - lastw * r) / denom; m_matT.coeffRef(i,n) = t; if (abs(x) > abs(lastw)) m_matT.coeffRef(i+1,n) = (-r - w * t) / x; else m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw; } // Overflow control Scalar t = abs(m_matT.coeff(i,n)); if ((eps * t) * t > Scalar(1)) m_matT.col(n).tail(size-i) /= t; } } } else if (q < Scalar(0) && n > 0) // Complex vector { Scalar lastra(0), lastsa(0), lastw(0); Index l = n-1; // Last vector component imaginary so matrix is triangular if (abs(m_matT.coeff(n,n-1)) > abs(m_matT.coeff(n-1,n))) { m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1); m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1); } else { std::complex cc = cdiv(0.0,-m_matT.coeff(n-1,n),m_matT.coeff(n-1,n-1)-p,q); m_matT.coeffRef(n-1,n-1) = numext::real(cc); m_matT.coeffRef(n-1,n) = numext::imag(cc); } m_matT.coeffRef(n,n-1) = 0.0; m_matT.coeffRef(n,n) = 1.0; for (Index i = n-2; i >= 0; i--) { Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1)); Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1)); Scalar w = m_matT.coeff(i,i) - p; if (m_eivalues.coeff(i).imag() < 0.0) { lastw = w; lastra = ra; lastsa = sa; } else { l = i; if (m_eivalues.coeff(i).imag() == RealScalar(0)) { std::complex cc = cdiv(-ra,-sa,w,q); m_matT.coeffRef(i,n-1) = numext::real(cc); m_matT.coeffRef(i,n) = numext::imag(cc); } else { // Solve complex equations Scalar x = m_matT.coeff(i,i+1); Scalar y = m_matT.coeff(i+1,i); Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q; Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q; if ((vr == 0.0) && (vi == 0.0)) vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw)); std::complex cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi); m_matT.coeffRef(i,n-1) = numext::real(cc); m_matT.coeffRef(i,n) = numext::imag(cc); if (abs(x) > (abs(lastw) + abs(q))) { m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x; m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x; } else { cc = cdiv(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n),lastw,q); m_matT.coeffRef(i+1,n-1) = numext::real(cc); m_matT.coeffRef(i+1,n) = numext::imag(cc); } } // Overflow control using std::max; Scalar t = (max)(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n))); if ((eps * t) * t > Scalar(1)) m_matT.block(i, n-1, size-i, 2) /= t; } } // We handled a pair of complex conjugate eigenvalues, so need to skip them both n--; } else { eigen_assert(0 && "Internal bug in EigenSolver"); // this should not happen } } // Back transformation to get eigenvectors of original matrix for (Index j = size-1; j >= 0; j--) { m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1); m_eivec.col(j) = m_tmp; } } } // end namespace Eigen #endif // EIGEN_EIGENSOLVER_H RcppEigen/inst/include/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h0000644000176200001440000001305712253717461025231 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2010 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIXBASEEIGENVALUES_H #define EIGEN_MATRIXBASEEIGENVALUES_H namespace Eigen { namespace internal { template struct eigenvalues_selector { // this is the implementation for the case IsComplex = true static inline typename MatrixBase::EigenvaluesReturnType const run(const MatrixBase& m) { typedef typename Derived::PlainObject PlainObject; PlainObject m_eval(m); return ComplexEigenSolver(m_eval, false).eigenvalues(); } }; template struct eigenvalues_selector { static inline typename MatrixBase::EigenvaluesReturnType const run(const MatrixBase& m) { typedef typename Derived::PlainObject PlainObject; PlainObject m_eval(m); return EigenSolver(m_eval, false).eigenvalues(); } }; } // end namespace internal /** \brief Computes the eigenvalues of a matrix * \returns Column vector containing the eigenvalues. * * \eigenvalues_module * This function computes the eigenvalues with the help of the EigenSolver * class (for real matrices) or the ComplexEigenSolver class (for complex * matrices). * * The eigenvalues are repeated according to their algebraic multiplicity, * so there are as many eigenvalues as rows in the matrix. * * The SelfAdjointView class provides a better algorithm for selfadjoint * matrices. * * Example: \include MatrixBase_eigenvalues.cpp * Output: \verbinclude MatrixBase_eigenvalues.out * * \sa EigenSolver::eigenvalues(), ComplexEigenSolver::eigenvalues(), * SelfAdjointView::eigenvalues() */ template inline typename MatrixBase::EigenvaluesReturnType MatrixBase::eigenvalues() const { typedef typename internal::traits::Scalar Scalar; return internal::eigenvalues_selector::IsComplex>::run(derived()); } /** \brief Computes the eigenvalues of a matrix * \returns Column vector containing the eigenvalues. * * \eigenvalues_module * This function computes the eigenvalues with the help of the * SelfAdjointEigenSolver class. The eigenvalues are repeated according to * their algebraic multiplicity, so there are as many eigenvalues as rows in * the matrix. * * Example: \include SelfAdjointView_eigenvalues.cpp * Output: \verbinclude SelfAdjointView_eigenvalues.out * * \sa SelfAdjointEigenSolver::eigenvalues(), MatrixBase::eigenvalues() */ template inline typename SelfAdjointView::EigenvaluesReturnType SelfAdjointView::eigenvalues() const { typedef typename SelfAdjointView::PlainObject PlainObject; PlainObject thisAsMatrix(*this); return SelfAdjointEigenSolver(thisAsMatrix, false).eigenvalues(); } /** \brief Computes the L2 operator norm * \returns Operator norm of the matrix. * * \eigenvalues_module * This function computes the L2 operator norm of a matrix, which is also * known as the spectral norm. The norm of a matrix \f$ A \f$ is defined to be * \f[ \|A\|_2 = \max_x \frac{\|Ax\|_2}{\|x\|_2} \f] * where the maximum is over all vectors and the norm on the right is the * Euclidean vector norm. The norm equals the largest singular value, which is * the square root of the largest eigenvalue of the positive semi-definite * matrix \f$ A^*A \f$. * * The current implementation uses the eigenvalues of \f$ A^*A \f$, as computed * by SelfAdjointView::eigenvalues(), to compute the operator norm of a * matrix. The SelfAdjointView class provides a better algorithm for * selfadjoint matrices. * * Example: \include MatrixBase_operatorNorm.cpp * Output: \verbinclude MatrixBase_operatorNorm.out * * \sa SelfAdjointView::eigenvalues(), SelfAdjointView::operatorNorm() */ template inline typename MatrixBase::RealScalar MatrixBase::operatorNorm() const { using std::sqrt; typename Derived::PlainObject m_eval(derived()); // FIXME if it is really guaranteed that the eigenvalues are already sorted, // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough. return sqrt((m_eval*m_eval.adjoint()) .eval() .template selfadjointView() .eigenvalues() .maxCoeff() ); } /** \brief Computes the L2 operator norm * \returns Operator norm of the matrix. * * \eigenvalues_module * This function computes the L2 operator norm of a self-adjoint matrix. For a * self-adjoint matrix, the operator norm is the largest eigenvalue. * * The current implementation uses the eigenvalues of the matrix, as computed * by eigenvalues(), to compute the operator norm of the matrix. * * Example: \include SelfAdjointView_operatorNorm.cpp * Output: \verbinclude SelfAdjointView_operatorNorm.out * * \sa eigenvalues(), MatrixBase::operatorNorm() */ template inline typename SelfAdjointView::RealScalar SelfAdjointView::operatorNorm() const { return eigenvalues().cwiseAbs().maxCoeff(); } } // end namespace Eigen #endif RcppEigen/inst/include/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h0000644000176200001440000002302212253717461027512 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // Copyright (C) 2010 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H #define EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H #include "./Tridiagonalization.h" namespace Eigen { /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class GeneralizedSelfAdjointEigenSolver * * \brief Computes eigenvalues and eigenvectors of the generalized selfadjoint eigen problem * * \tparam _MatrixType the type of the matrix of which we are computing the * eigendecomposition; this is expected to be an instantiation of the Matrix * class template. * * This class solves the generalized eigenvalue problem * \f$ Av = \lambda Bv \f$. In this case, the matrix \f$ A \f$ should be * selfadjoint and the matrix \f$ B \f$ should be positive definite. * * Only the \b lower \b triangular \b part of the input matrix is referenced. * * Call the function compute() to compute the eigenvalues and eigenvectors of * a given matrix. Alternatively, you can use the * GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int) * constructor which computes the eigenvalues and eigenvectors at construction time. * Once the eigenvalue and eigenvectors are computed, they can be retrieved with the eigenvalues() * and eigenvectors() functions. * * The documentation for GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int) * contains an example of the typical use of this class. * * \sa class SelfAdjointEigenSolver, class EigenSolver, class ComplexEigenSolver */ template class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixType> { typedef SelfAdjointEigenSolver<_MatrixType> Base; public: typedef typename Base::Index Index; typedef _MatrixType MatrixType; /** \brief Default constructor for fixed-size matrices. * * The default constructor is useful in cases in which the user intends to * perform decompositions via compute(). This constructor * can only be used if \p _MatrixType is a fixed-size matrix; use * GeneralizedSelfAdjointEigenSolver(Index) for dynamic-size matrices. */ GeneralizedSelfAdjointEigenSolver() : Base() {} /** \brief Constructor, pre-allocates memory for dynamic-size matrices. * * \param [in] size Positive integer, size of the matrix whose * eigenvalues and eigenvectors will be computed. * * This constructor is useful for dynamic-size matrices, when the user * intends to perform decompositions via compute(). The \p size * parameter is only used as a hint. It is not an error to give a wrong * \p size, but it may impair performance. * * \sa compute() for an example */ GeneralizedSelfAdjointEigenSolver(Index size) : Base(size) {} /** \brief Constructor; computes generalized eigendecomposition of given matrix pencil. * * \param[in] matA Selfadjoint matrix in matrix pencil. * Only the lower triangular part of the matrix is referenced. * \param[in] matB Positive-definite matrix in matrix pencil. * Only the lower triangular part of the matrix is referenced. * \param[in] options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}. * Default is #ComputeEigenvectors|#Ax_lBx. * * This constructor calls compute(const MatrixType&, const MatrixType&, int) * to compute the eigenvalues and (if requested) the eigenvectors of the * generalized eigenproblem \f$ Ax = \lambda B x \f$ with \a matA the * selfadjoint matrix \f$ A \f$ and \a matB the positive definite matrix * \f$ B \f$. Each eigenvector \f$ x \f$ satisfies the property * \f$ x^* B x = 1 \f$. The eigenvectors are computed if * \a options contains ComputeEigenvectors. * * In addition, the two following variants can be solved via \p options: * - \c ABx_lx: \f$ ABx = \lambda x \f$ * - \c BAx_lx: \f$ BAx = \lambda x \f$ * * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.out * * \sa compute(const MatrixType&, const MatrixType&, int) */ GeneralizedSelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, int options = ComputeEigenvectors|Ax_lBx) : Base(matA.cols()) { compute(matA, matB, options); } /** \brief Computes generalized eigendecomposition of given matrix pencil. * * \param[in] matA Selfadjoint matrix in matrix pencil. * Only the lower triangular part of the matrix is referenced. * \param[in] matB Positive-definite matrix in matrix pencil. * Only the lower triangular part of the matrix is referenced. * \param[in] options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}. * Default is #ComputeEigenvectors|#Ax_lBx. * * \returns Reference to \c *this * * Accoring to \p options, this function computes eigenvalues and (if requested) * the eigenvectors of one of the following three generalized eigenproblems: * - \c Ax_lBx: \f$ Ax = \lambda B x \f$ * - \c ABx_lx: \f$ ABx = \lambda x \f$ * - \c BAx_lx: \f$ BAx = \lambda x \f$ * with \a matA the selfadjoint matrix \f$ A \f$ and \a matB the positive definite * matrix \f$ B \f$. * In addition, each eigenvector \f$ x \f$ satisfies the property \f$ x^* B x = 1 \f$. * * The eigenvalues() function can be used to retrieve * the eigenvalues. If \p options contains ComputeEigenvectors, then the * eigenvectors are also computed and can be retrieved by calling * eigenvectors(). * * The implementation uses LLT to compute the Cholesky decomposition * \f$ B = LL^* \f$ and computes the classical eigendecomposition * of the selfadjoint matrix \f$ L^{-1} A (L^*)^{-1} \f$ if \p options contains Ax_lBx * and of \f$ L^{*} A L \f$ otherwise. This solves the * generalized eigenproblem, because any solution of the generalized * eigenproblem \f$ Ax = \lambda B x \f$ corresponds to a solution * \f$ L^{-1} A (L^*)^{-1} (L^* x) = \lambda (L^* x) \f$ of the * eigenproblem for \f$ L^{-1} A (L^*)^{-1} \f$. Similar statements * can be made for the two other variants. * * Example: \include SelfAdjointEigenSolver_compute_MatrixType2.cpp * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType2.out * * \sa GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int) */ GeneralizedSelfAdjointEigenSolver& compute(const MatrixType& matA, const MatrixType& matB, int options = ComputeEigenvectors|Ax_lBx); protected: }; template GeneralizedSelfAdjointEigenSolver& GeneralizedSelfAdjointEigenSolver:: compute(const MatrixType& matA, const MatrixType& matB, int options) { eigen_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows()); eigen_assert((options&~(EigVecMask|GenEigMask))==0 && (options&EigVecMask)!=EigVecMask && ((options&GenEigMask)==0 || (options&GenEigMask)==Ax_lBx || (options&GenEigMask)==ABx_lx || (options&GenEigMask)==BAx_lx) && "invalid option parameter"); bool computeEigVecs = ((options&EigVecMask)==0) || ((options&EigVecMask)==ComputeEigenvectors); // Compute the cholesky decomposition of matB = L L' = U'U LLT cholB(matB); int type = (options&GenEigMask); if(type==0) type = Ax_lBx; if(type==Ax_lBx) { // compute C = inv(L) A inv(L') MatrixType matC = matA.template selfadjointView(); cholB.matrixL().template solveInPlace(matC); cholB.matrixU().template solveInPlace(matC); Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly ); // transform back the eigen vectors: evecs = inv(U) * evecs if(computeEigVecs) cholB.matrixU().solveInPlace(Base::m_eivec); } else if(type==ABx_lx) { // compute C = L' A L MatrixType matC = matA.template selfadjointView(); matC = matC * cholB.matrixL(); matC = cholB.matrixU() * matC; Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly); // transform back the eigen vectors: evecs = inv(U) * evecs if(computeEigVecs) cholB.matrixU().solveInPlace(Base::m_eivec); } else if(type==BAx_lx) { // compute C = L' A L MatrixType matC = matA.template selfadjointView(); matC = matC * cholB.matrixL(); matC = cholB.matrixU() * matC; Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly); // transform back the eigen vectors: evecs = L * evecs if(computeEigVecs) Base::m_eivec = cholB.matrixL() * Base::m_eivec; } return *this; } } // end namespace Eigen #endif // EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H RcppEigen/inst/include/Eigen/src/misc/0000755000176200001440000000000012253717461017307 5ustar liggesusersRcppEigen/inst/include/Eigen/src/misc/Solve.h0000644000176200001440000000466512253717461020563 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MISC_SOLVE_H #define EIGEN_MISC_SOLVE_H namespace Eigen { namespace internal { /** \class solve_retval_base * */ template struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix ReturnType; }; template struct solve_retval_base : public ReturnByValue > { typedef typename remove_all::type RhsNestedCleaned; typedef _DecompositionType DecompositionType; typedef ReturnByValue Base; typedef typename Base::Index Index; solve_retval_base(const DecompositionType& dec, const Rhs& rhs) : m_dec(dec), m_rhs(rhs) {} inline Index rows() const { return m_dec.cols(); } inline Index cols() const { return m_rhs.cols(); } inline const DecompositionType& dec() const { return m_dec; } inline const RhsNestedCleaned& rhs() const { return m_rhs; } template inline void evalTo(Dest& dst) const { static_cast*>(this)->evalTo(dst); } protected: const DecompositionType& m_dec; typename Rhs::Nested m_rhs; }; } // end namespace internal #define EIGEN_MAKE_SOLVE_HELPERS(DecompositionType,Rhs) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef typename MatrixType::Index Index; \ typedef Eigen::internal::solve_retval_base Base; \ using Base::dec; \ using Base::rhs; \ using Base::rows; \ using Base::cols; \ solve_retval(const DecompositionType& dec, const Rhs& rhs) \ : Base(dec, rhs) {} } // end namespace Eigen #endif // EIGEN_MISC_SOLVE_H RcppEigen/inst/include/Eigen/src/misc/Image.h0000644000176200001440000000566512253717461020516 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MISC_IMAGE_H #define EIGEN_MISC_IMAGE_H namespace Eigen { namespace internal { /** \class image_retval_base * */ template struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix< typename MatrixType::Scalar, MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose // dimension is the number of rows of the original matrix Dynamic, // we don't know at compile time the dimension of the image (the rank) MatrixType::Options, MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix, MatrixType::MaxColsAtCompileTime // so it has the same number of rows and at most as many columns. > ReturnType; }; template struct image_retval_base : public ReturnByValue > { typedef _DecompositionType DecompositionType; typedef typename DecompositionType::MatrixType MatrixType; typedef ReturnByValue Base; typedef typename Base::Index Index; image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix) : m_dec(dec), m_rank(dec.rank()), m_cols(m_rank == 0 ? 1 : m_rank), m_originalMatrix(originalMatrix) {} inline Index rows() const { return m_dec.rows(); } inline Index cols() const { return m_cols; } inline Index rank() const { return m_rank; } inline const DecompositionType& dec() const { return m_dec; } inline const MatrixType& originalMatrix() const { return m_originalMatrix; } template inline void evalTo(Dest& dst) const { static_cast*>(this)->evalTo(dst); } protected: const DecompositionType& m_dec; Index m_rank, m_cols; const MatrixType& m_originalMatrix; }; } // end namespace internal #define EIGEN_MAKE_IMAGE_HELPERS(DecompositionType) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef typename MatrixType::Index Index; \ typedef Eigen::internal::image_retval_base Base; \ using Base::dec; \ using Base::originalMatrix; \ using Base::rank; \ using Base::rows; \ using Base::cols; \ image_retval(const DecompositionType& dec, const MatrixType& originalMatrix) \ : Base(dec, originalMatrix) {} } // end namespace Eigen #endif // EIGEN_MISC_IMAGE_H RcppEigen/inst/include/Eigen/src/misc/blas.h0000644000176200001440000010150312253717461020401 0ustar liggesusers#ifndef BLAS_H #define BLAS_H #ifdef __cplusplus extern "C" { #endif #define BLASFUNC(FUNC) FUNC##_ #ifdef __WIN64__ typedef long long BLASLONG; typedef unsigned long long BLASULONG; #else typedef long BLASLONG; typedef unsigned long BLASULONG; #endif int BLASFUNC(xerbla)(const char *, int *info, int); float BLASFUNC(sdot) (int *, float *, int *, float *, int *); float BLASFUNC(sdsdot)(int *, float *, float *, int *, float *, int *); double BLASFUNC(dsdot) (int *, float *, int *, float *, int *); double BLASFUNC(ddot) (int *, double *, int *, double *, int *); double BLASFUNC(qdot) (int *, double *, int *, double *, int *); int BLASFUNC(cdotuw) (int *, float *, int *, float *, int *, float*); int BLASFUNC(cdotcw) (int *, float *, int *, float *, int *, float*); int BLASFUNC(zdotuw) (int *, double *, int *, double *, int *, double*); int BLASFUNC(zdotcw) (int *, double *, int *, double *, int *, double*); int BLASFUNC(saxpy) (int *, float *, float *, int *, float *, int *); int BLASFUNC(daxpy) (int *, double *, double *, int *, double *, int *); int BLASFUNC(qaxpy) (int *, double *, double *, int *, double *, int *); int BLASFUNC(caxpy) (int *, float *, float *, int *, float *, int *); int BLASFUNC(zaxpy) (int *, double *, double *, int *, double *, int *); int BLASFUNC(xaxpy) (int *, double *, double *, int *, double *, int *); int BLASFUNC(caxpyc)(int *, float *, float *, int *, float *, int *); int BLASFUNC(zaxpyc)(int *, double *, double *, int *, double *, int *); int BLASFUNC(xaxpyc)(int *, double *, double *, int *, double *, int *); int BLASFUNC(scopy) (int *, float *, int *, float *, int *); int BLASFUNC(dcopy) (int *, double *, int *, double *, int *); int BLASFUNC(qcopy) (int *, double *, int *, double *, int *); int BLASFUNC(ccopy) (int *, float *, int *, float *, int *); int BLASFUNC(zcopy) (int *, double *, int *, double *, int *); int BLASFUNC(xcopy) (int *, double *, int *, double *, int *); int BLASFUNC(sswap) (int *, float *, int *, float *, int *); int BLASFUNC(dswap) (int *, double *, int *, double *, int *); int BLASFUNC(qswap) (int *, double *, int *, double *, int *); int BLASFUNC(cswap) (int *, float *, int *, float *, int *); int BLASFUNC(zswap) (int *, double *, int *, double *, int *); int BLASFUNC(xswap) (int *, double *, int *, double *, int *); float BLASFUNC(sasum) (int *, float *, int *); float BLASFUNC(scasum)(int *, float *, int *); double BLASFUNC(dasum) (int *, double *, int *); double BLASFUNC(qasum) (int *, double *, int *); double BLASFUNC(dzasum)(int *, double *, int *); double BLASFUNC(qxasum)(int *, double *, int *); int BLASFUNC(isamax)(int *, float *, int *); int BLASFUNC(idamax)(int *, double *, int *); int BLASFUNC(iqamax)(int *, double *, int *); int BLASFUNC(icamax)(int *, float *, int *); int BLASFUNC(izamax)(int *, double *, int *); int BLASFUNC(ixamax)(int *, double *, int *); int BLASFUNC(ismax) (int *, float *, int *); int BLASFUNC(idmax) (int *, double *, int *); int BLASFUNC(iqmax) (int *, double *, int *); int BLASFUNC(icmax) (int *, float *, int *); int BLASFUNC(izmax) (int *, double *, int *); int BLASFUNC(ixmax) (int *, double *, int *); int BLASFUNC(isamin)(int *, float *, int *); int BLASFUNC(idamin)(int *, double *, int *); int BLASFUNC(iqamin)(int *, double *, int *); int BLASFUNC(icamin)(int *, float *, int *); int BLASFUNC(izamin)(int *, double *, int *); int BLASFUNC(ixamin)(int *, double *, int *); int BLASFUNC(ismin)(int *, float *, int *); int BLASFUNC(idmin)(int *, double *, int *); int BLASFUNC(iqmin)(int *, double *, int *); int BLASFUNC(icmin)(int *, float *, int *); int BLASFUNC(izmin)(int *, double *, int *); int BLASFUNC(ixmin)(int *, double *, int *); float BLASFUNC(samax) (int *, float *, int *); double BLASFUNC(damax) (int *, double *, int *); double BLASFUNC(qamax) (int *, double *, int *); float BLASFUNC(scamax)(int *, float *, int *); double BLASFUNC(dzamax)(int *, double *, int *); double BLASFUNC(qxamax)(int *, double *, int *); float BLASFUNC(samin) (int *, float *, int *); double BLASFUNC(damin) (int *, double *, int *); double BLASFUNC(qamin) (int *, double *, int *); float BLASFUNC(scamin)(int *, float *, int *); double BLASFUNC(dzamin)(int *, double *, int *); double BLASFUNC(qxamin)(int *, double *, int *); float BLASFUNC(smax) (int *, float *, int *); double BLASFUNC(dmax) (int *, double *, int *); double BLASFUNC(qmax) (int *, double *, int *); float BLASFUNC(scmax) (int *, float *, int *); double BLASFUNC(dzmax) (int *, double *, int *); double BLASFUNC(qxmax) (int *, double *, int *); float BLASFUNC(smin) (int *, float *, int *); double BLASFUNC(dmin) (int *, double *, int *); double BLASFUNC(qmin) (int *, double *, int *); float BLASFUNC(scmin) (int *, float *, int *); double BLASFUNC(dzmin) (int *, double *, int *); double BLASFUNC(qxmin) (int *, double *, int *); int BLASFUNC(sscal) (int *, float *, float *, int *); int BLASFUNC(dscal) (int *, double *, double *, int *); int BLASFUNC(qscal) (int *, double *, double *, int *); int BLASFUNC(cscal) (int *, float *, float *, int *); int BLASFUNC(zscal) (int *, double *, double *, int *); int BLASFUNC(xscal) (int *, double *, double *, int *); int BLASFUNC(csscal)(int *, float *, float *, int *); int BLASFUNC(zdscal)(int *, double *, double *, int *); int BLASFUNC(xqscal)(int *, double *, double *, int *); float BLASFUNC(snrm2) (int *, float *, int *); float BLASFUNC(scnrm2)(int *, float *, int *); double BLASFUNC(dnrm2) (int *, double *, int *); double BLASFUNC(qnrm2) (int *, double *, int *); double BLASFUNC(dznrm2)(int *, double *, int *); double BLASFUNC(qxnrm2)(int *, double *, int *); int BLASFUNC(srot) (int *, float *, int *, float *, int *, float *, float *); int BLASFUNC(drot) (int *, double *, int *, double *, int *, double *, double *); int BLASFUNC(qrot) (int *, double *, int *, double *, int *, double *, double *); int BLASFUNC(csrot) (int *, float *, int *, float *, int *, float *, float *); int BLASFUNC(zdrot) (int *, double *, int *, double *, int *, double *, double *); int BLASFUNC(xqrot) (int *, double *, int *, double *, int *, double *, double *); int BLASFUNC(srotg) (float *, float *, float *, float *); int BLASFUNC(drotg) (double *, double *, double *, double *); int BLASFUNC(qrotg) (double *, double *, double *, double *); int BLASFUNC(crotg) (float *, float *, float *, float *); int BLASFUNC(zrotg) (double *, double *, double *, double *); int BLASFUNC(xrotg) (double *, double *, double *, double *); int BLASFUNC(srotmg)(float *, float *, float *, float *, float *); int BLASFUNC(drotmg)(double *, double *, double *, double *, double *); int BLASFUNC(srotm) (int *, float *, int *, float *, int *, float *); int BLASFUNC(drotm) (int *, double *, int *, double *, int *, double *); int BLASFUNC(qrotm) (int *, double *, int *, double *, int *, double *); /* Level 2 routines */ int BLASFUNC(sger)(int *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(dger)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(qger)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(cgeru)(int *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(cgerc)(int *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(zgeru)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(zgerc)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(xgeru)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(xgerc)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(sgemv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dgemv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qgemv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cgemv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zgemv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xgemv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(strsv) (char *, char *, char *, int *, float *, int *, float *, int *); int BLASFUNC(dtrsv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(qtrsv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(ctrsv) (char *, char *, char *, int *, float *, int *, float *, int *); int BLASFUNC(ztrsv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(xtrsv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(stpsv) (char *, char *, char *, int *, float *, float *, int *); int BLASFUNC(dtpsv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(qtpsv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(ctpsv) (char *, char *, char *, int *, float *, float *, int *); int BLASFUNC(ztpsv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(xtpsv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(strmv) (char *, char *, char *, int *, float *, int *, float *, int *); int BLASFUNC(dtrmv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(qtrmv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(ctrmv) (char *, char *, char *, int *, float *, int *, float *, int *); int BLASFUNC(ztrmv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(xtrmv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(stpmv) (char *, char *, char *, int *, float *, float *, int *); int BLASFUNC(dtpmv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(qtpmv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(ctpmv) (char *, char *, char *, int *, float *, float *, int *); int BLASFUNC(ztpmv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(xtpmv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(stbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); int BLASFUNC(dtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(qtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(ctbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); int BLASFUNC(ztbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(xtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(stbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); int BLASFUNC(dtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(qtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(ctbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); int BLASFUNC(ztbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(xtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(ssymv) (char *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dsymv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qsymv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(csymv) (char *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsymv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xsymv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(sspmv) (char *, int *, float *, float *, float *, int *, float *, float *, int *); int BLASFUNC(dspmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(qspmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(cspmv) (char *, int *, float *, float *, float *, int *, float *, float *, int *); int BLASFUNC(zspmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(xspmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(ssyr) (char *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dsyr) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(qsyr) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(csyr) (char *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zsyr) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(xsyr) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(ssyr2) (char *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(dsyr2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(qsyr2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(csyr2) (char *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(zsyr2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(xsyr2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(sspr) (char *, int *, float *, float *, int *, float *); int BLASFUNC(dspr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(qspr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(cspr) (char *, int *, float *, float *, int *, float *); int BLASFUNC(zspr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(xspr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(sspr2) (char *, int *, float *, float *, int *, float *, int *, float *); int BLASFUNC(dspr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(qspr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(cspr2) (char *, int *, float *, float *, int *, float *, int *, float *); int BLASFUNC(zspr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(xspr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(cher) (char *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zher) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(xher) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(chpr) (char *, int *, float *, float *, int *, float *); int BLASFUNC(zhpr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(xhpr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(cher2) (char *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(zher2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(xher2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(chpr2) (char *, int *, float *, float *, int *, float *, int *, float *); int BLASFUNC(zhpr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(xhpr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(chemv) (char *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zhemv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xhemv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(chpmv) (char *, int *, float *, float *, float *, int *, float *, float *, int *); int BLASFUNC(zhpmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(xhpmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(snorm)(char *, int *, int *, float *, int *); int BLASFUNC(dnorm)(char *, int *, int *, double *, int *); int BLASFUNC(cnorm)(char *, int *, int *, float *, int *); int BLASFUNC(znorm)(char *, int *, int *, double *, int *); int BLASFUNC(sgbmv)(char *, int *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cgbmv)(char *, int *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(ssbmv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dsbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qsbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(csbmv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xsbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(chbmv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zhbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xhbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); /* Level 3 routines */ int BLASFUNC(sgemm)(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dgemm)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qgemm)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cgemm)(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zgemm)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xgemm)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cgemm3m)(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zgemm3m)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xgemm3m)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(sge2mm)(char *, char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dge2mm)(char *, char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cge2mm)(char *, char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zge2mm)(char *, char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(strsm)(char *, char *, char *, char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dtrsm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(qtrsm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(ctrsm)(char *, char *, char *, char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(ztrsm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(xtrsm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(strmm)(char *, char *, char *, char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dtrmm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(qtrmm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(ctrmm)(char *, char *, char *, char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(ztrmm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(xtrmm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(ssymm)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dsymm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qsymm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(csymm)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsymm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xsymm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(csymm3m)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsymm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xsymm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(ssyrk)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *); int BLASFUNC(dsyrk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(qsyrk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(csyrk)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *); int BLASFUNC(zsyrk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(xsyrk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(ssyr2k)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dsyr2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(qsyr2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(csyr2k)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsyr2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(xsyr2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(chemm)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zhemm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xhemm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(chemm3m)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zhemm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xhemm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cherk)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *); int BLASFUNC(zherk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(xherk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(cher2k)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zher2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(xher2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(cher2m)(char *, char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zher2m)(char *, char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(xher2m)(char *, char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(sgemt)(char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dgemt)(char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(cgemt)(char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zgemt)(char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(sgema)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dgema)(char *, char *, int *, int *, double *, double *, int *, double*, double *, int *, double*, int *); int BLASFUNC(cgema)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zgema)(char *, char *, int *, int *, double *, double *, int *, double*, double *, int *, double*, int *); int BLASFUNC(sgems)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dgems)(char *, char *, int *, int *, double *, double *, int *, double*, double *, int *, double*, int *); int BLASFUNC(cgems)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zgems)(char *, char *, int *, int *, double *, double *, int *, double*, double *, int *, double*, int *); int BLASFUNC(sgetf2)(int *, int *, float *, int *, int *, int *); int BLASFUNC(dgetf2)(int *, int *, double *, int *, int *, int *); int BLASFUNC(qgetf2)(int *, int *, double *, int *, int *, int *); int BLASFUNC(cgetf2)(int *, int *, float *, int *, int *, int *); int BLASFUNC(zgetf2)(int *, int *, double *, int *, int *, int *); int BLASFUNC(xgetf2)(int *, int *, double *, int *, int *, int *); int BLASFUNC(sgetrf)(int *, int *, float *, int *, int *, int *); int BLASFUNC(dgetrf)(int *, int *, double *, int *, int *, int *); int BLASFUNC(qgetrf)(int *, int *, double *, int *, int *, int *); int BLASFUNC(cgetrf)(int *, int *, float *, int *, int *, int *); int BLASFUNC(zgetrf)(int *, int *, double *, int *, int *, int *); int BLASFUNC(xgetrf)(int *, int *, double *, int *, int *, int *); int BLASFUNC(slaswp)(int *, float *, int *, int *, int *, int *, int *); int BLASFUNC(dlaswp)(int *, double *, int *, int *, int *, int *, int *); int BLASFUNC(qlaswp)(int *, double *, int *, int *, int *, int *, int *); int BLASFUNC(claswp)(int *, float *, int *, int *, int *, int *, int *); int BLASFUNC(zlaswp)(int *, double *, int *, int *, int *, int *, int *); int BLASFUNC(xlaswp)(int *, double *, int *, int *, int *, int *, int *); int BLASFUNC(sgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *); int BLASFUNC(dgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); int BLASFUNC(qgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); int BLASFUNC(cgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *); int BLASFUNC(zgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); int BLASFUNC(xgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); int BLASFUNC(sgesv)(int *, int *, float *, int *, int *, float *, int *, int *); int BLASFUNC(dgesv)(int *, int *, double *, int *, int *, double*, int *, int *); int BLASFUNC(qgesv)(int *, int *, double *, int *, int *, double*, int *, int *); int BLASFUNC(cgesv)(int *, int *, float *, int *, int *, float *, int *, int *); int BLASFUNC(zgesv)(int *, int *, double *, int *, int *, double*, int *, int *); int BLASFUNC(xgesv)(int *, int *, double *, int *, int *, double*, int *, int *); int BLASFUNC(spotf2)(char *, int *, float *, int *, int *); int BLASFUNC(dpotf2)(char *, int *, double *, int *, int *); int BLASFUNC(qpotf2)(char *, int *, double *, int *, int *); int BLASFUNC(cpotf2)(char *, int *, float *, int *, int *); int BLASFUNC(zpotf2)(char *, int *, double *, int *, int *); int BLASFUNC(xpotf2)(char *, int *, double *, int *, int *); int BLASFUNC(spotrf)(char *, int *, float *, int *, int *); int BLASFUNC(dpotrf)(char *, int *, double *, int *, int *); int BLASFUNC(qpotrf)(char *, int *, double *, int *, int *); int BLASFUNC(cpotrf)(char *, int *, float *, int *, int *); int BLASFUNC(zpotrf)(char *, int *, double *, int *, int *); int BLASFUNC(xpotrf)(char *, int *, double *, int *, int *); int BLASFUNC(slauu2)(char *, int *, float *, int *, int *); int BLASFUNC(dlauu2)(char *, int *, double *, int *, int *); int BLASFUNC(qlauu2)(char *, int *, double *, int *, int *); int BLASFUNC(clauu2)(char *, int *, float *, int *, int *); int BLASFUNC(zlauu2)(char *, int *, double *, int *, int *); int BLASFUNC(xlauu2)(char *, int *, double *, int *, int *); int BLASFUNC(slauum)(char *, int *, float *, int *, int *); int BLASFUNC(dlauum)(char *, int *, double *, int *, int *); int BLASFUNC(qlauum)(char *, int *, double *, int *, int *); int BLASFUNC(clauum)(char *, int *, float *, int *, int *); int BLASFUNC(zlauum)(char *, int *, double *, int *, int *); int BLASFUNC(xlauum)(char *, int *, double *, int *, int *); int BLASFUNC(strti2)(char *, char *, int *, float *, int *, int *); int BLASFUNC(dtrti2)(char *, char *, int *, double *, int *, int *); int BLASFUNC(qtrti2)(char *, char *, int *, double *, int *, int *); int BLASFUNC(ctrti2)(char *, char *, int *, float *, int *, int *); int BLASFUNC(ztrti2)(char *, char *, int *, double *, int *, int *); int BLASFUNC(xtrti2)(char *, char *, int *, double *, int *, int *); int BLASFUNC(strtri)(char *, char *, int *, float *, int *, int *); int BLASFUNC(dtrtri)(char *, char *, int *, double *, int *, int *); int BLASFUNC(qtrtri)(char *, char *, int *, double *, int *, int *); int BLASFUNC(ctrtri)(char *, char *, int *, float *, int *, int *); int BLASFUNC(ztrtri)(char *, char *, int *, double *, int *, int *); int BLASFUNC(xtrtri)(char *, char *, int *, double *, int *, int *); int BLASFUNC(spotri)(char *, int *, float *, int *, int *); int BLASFUNC(dpotri)(char *, int *, double *, int *, int *); int BLASFUNC(qpotri)(char *, int *, double *, int *, int *); int BLASFUNC(cpotri)(char *, int *, float *, int *, int *); int BLASFUNC(zpotri)(char *, int *, double *, int *, int *); int BLASFUNC(xpotri)(char *, int *, double *, int *, int *); #ifdef __cplusplus } #endif #endif RcppEigen/inst/include/Eigen/src/misc/SparseSolve.h0000644000176200001440000001113112253717461021723 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_SOLVE_H #define EIGEN_SPARSE_SOLVE_H namespace Eigen { namespace internal { template struct sparse_solve_retval_base; template struct sparse_solve_retval; template struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef SparseMatrix ReturnType; }; template struct sparse_solve_retval_base : public ReturnByValue > { typedef typename remove_all::type RhsNestedCleaned; typedef _DecompositionType DecompositionType; typedef ReturnByValue Base; typedef typename Base::Index Index; sparse_solve_retval_base(const DecompositionType& dec, const Rhs& rhs) : m_dec(dec), m_rhs(rhs) {} inline Index rows() const { return m_dec.cols(); } inline Index cols() const { return m_rhs.cols(); } inline const DecompositionType& dec() const { return m_dec; } inline const RhsNestedCleaned& rhs() const { return m_rhs; } template inline void evalTo(Dest& dst) const { static_cast*>(this)->evalTo(dst); } protected: template inline void defaultEvalTo(SparseMatrix& dst) const { // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix. static const int NbColsAtOnce = 4; int rhsCols = m_rhs.cols(); int size = m_rhs.rows(); Eigen::Matrix tmp(size,rhsCols); Eigen::Matrix tmpX(size,rhsCols); for(int k=0; k(rhsCols-k, NbColsAtOnce); tmp.leftCols(actualCols) = m_rhs.middleCols(k,actualCols); tmpX.leftCols(actualCols) = m_dec.solve(tmp.leftCols(actualCols)); dst.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView(); } } const DecompositionType& m_dec; typename Rhs::Nested m_rhs; }; #define EIGEN_MAKE_SPARSE_SOLVE_HELPERS(DecompositionType,Rhs) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef typename MatrixType::Index Index; \ typedef Eigen::internal::sparse_solve_retval_base Base; \ using Base::dec; \ using Base::rhs; \ using Base::rows; \ using Base::cols; \ sparse_solve_retval(const DecompositionType& dec, const Rhs& rhs) \ : Base(dec, rhs) {} template struct solve_retval_with_guess; template struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix ReturnType; }; template struct solve_retval_with_guess : public ReturnByValue > { typedef typename DecompositionType::Index Index; solve_retval_with_guess(const DecompositionType& dec, const Rhs& rhs, const Guess& guess) : m_dec(dec), m_rhs(rhs), m_guess(guess) {} inline Index rows() const { return m_dec.cols(); } inline Index cols() const { return m_rhs.cols(); } template inline void evalTo(Dest& dst) const { dst = m_guess; m_dec._solveWithGuess(m_rhs,dst); } protected: const DecompositionType& m_dec; const typename Rhs::Nested m_rhs; const typename Guess::Nested m_guess; }; } // namepsace internal } // end namespace Eigen #endif // EIGEN_SPARSE_SOLVE_H RcppEigen/inst/include/Eigen/src/misc/Kernel.h0000644000176200001440000000540112253717461020700 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MISC_KERNEL_H #define EIGEN_MISC_KERNEL_H namespace Eigen { namespace internal { /** \class kernel_retval_base * */ template struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix< typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, // the number of rows in the "kernel matrix" // is the number of cols of the original matrix // so that the product "matrix * kernel = zero" makes sense Dynamic, // we don't know at compile-time the dimension of the kernel MatrixType::Options, MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space, // whose dimension is the number of columns of the original matrix > ReturnType; }; template struct kernel_retval_base : public ReturnByValue > { typedef _DecompositionType DecompositionType; typedef ReturnByValue Base; typedef typename Base::Index Index; kernel_retval_base(const DecompositionType& dec) : m_dec(dec), m_rank(dec.rank()), m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank) {} inline Index rows() const { return m_dec.cols(); } inline Index cols() const { return m_cols; } inline Index rank() const { return m_rank; } inline const DecompositionType& dec() const { return m_dec; } template inline void evalTo(Dest& dst) const { static_cast*>(this)->evalTo(dst); } protected: const DecompositionType& m_dec; Index m_rank, m_cols; }; } // end namespace internal #define EIGEN_MAKE_KERNEL_HELPERS(DecompositionType) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef typename MatrixType::Index Index; \ typedef Eigen::internal::kernel_retval_base Base; \ using Base::dec; \ using Base::rank; \ using Base::rows; \ using Base::cols; \ kernel_retval(const DecompositionType& dec) : Base(dec) {} } // end namespace Eigen #endif // EIGEN_MISC_KERNEL_H RcppEigen/inst/include/Eigen/src/SparseCore/0000755000176200001440000000000012647570720020424 5ustar liggesusersRcppEigen/inst/include/Eigen/src/SparseCore/SparseView.h0000644000176200001440000000535712501315103022654 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // Copyright (C) 2010 Daniel Lowengrub // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEVIEW_H #define EIGEN_SPARSEVIEW_H namespace Eigen { namespace internal { template struct traits > : traits { typedef typename MatrixType::Index Index; typedef Sparse StorageKind; enum { Flags = int(traits::Flags) & (RowMajorBit) }; }; } // end namespace internal template class SparseView : public SparseMatrixBase > { typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_all::type _MatrixTypeNested; public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView) SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0), typename NumTraits::Real m_epsilon = NumTraits::dummy_precision()) : m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {} class InnerIterator; inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } inline Index innerSize() const { return m_matrix.innerSize(); } inline Index outerSize() const { return m_matrix.outerSize(); } protected: MatrixTypeNested m_matrix; Scalar m_reference; typename NumTraits::Real m_epsilon; }; template class SparseView::InnerIterator : public _MatrixTypeNested::InnerIterator { typedef typename SparseView::Index Index; public: typedef typename _MatrixTypeNested::InnerIterator IterBase; InnerIterator(const SparseView& view, Index outer) : IterBase(view.m_matrix, outer), m_view(view) { incrementToNonZero(); } EIGEN_STRONG_INLINE InnerIterator& operator++() { IterBase::operator++(); incrementToNonZero(); return *this; } using IterBase::value; protected: const SparseView& m_view; private: void incrementToNonZero() { while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon)) { IterBase::operator++(); } } }; template const SparseView MatrixBase::sparseView(const Scalar& m_reference, const typename NumTraits::Real& m_epsilon) const { return SparseView(derived(), m_reference, m_epsilon); } } // end namespace Eigen #endif RcppEigen/inst/include/Eigen/src/SparseCore/CompressedStorage.h0000644000176200001440000001506312647570720024233 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COMPRESSED_STORAGE_H #define EIGEN_COMPRESSED_STORAGE_H namespace Eigen { namespace internal { /** \internal * Stores a sparse set of values as a list of values and a list of indices. * */ template class CompressedStorage { public: typedef _Scalar Scalar; typedef _Index Index; protected: typedef typename NumTraits::Real RealScalar; public: CompressedStorage() : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) {} CompressedStorage(size_t size) : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) { resize(size); } CompressedStorage(const CompressedStorage& other) : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) { *this = other; } CompressedStorage& operator=(const CompressedStorage& other) { resize(other.size()); internal::smart_copy(other.m_values, other.m_values + m_size, m_values); internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices); return *this; } void swap(CompressedStorage& other) { std::swap(m_values, other.m_values); std::swap(m_indices, other.m_indices); std::swap(m_size, other.m_size); std::swap(m_allocatedSize, other.m_allocatedSize); } ~CompressedStorage() { delete[] m_values; delete[] m_indices; } void reserve(size_t size) { size_t newAllocatedSize = m_size + size; if (newAllocatedSize > m_allocatedSize) reallocate(newAllocatedSize); } void squeeze() { if (m_allocatedSize>m_size) reallocate(m_size); } void resize(size_t size, double reserveSizeFactor = 0) { if (m_allocatedSize(m_size); resize(m_size+1, 1); m_values[id] = v; m_indices[id] = i; } inline size_t size() const { return m_size; } inline size_t allocatedSize() const { return m_allocatedSize; } inline void clear() { m_size = 0; } inline Scalar& value(size_t i) { return m_values[i]; } inline const Scalar& value(size_t i) const { return m_values[i]; } inline Index& index(size_t i) { return m_indices[i]; } inline const Index& index(size_t i) const { return m_indices[i]; } static CompressedStorage Map(Index* indices, Scalar* values, size_t size) { CompressedStorage res; res.m_indices = indices; res.m_values = values; res.m_allocatedSize = res.m_size = size; return res; } /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */ inline Index searchLowerIndex(Index key) const { return searchLowerIndex(0, m_size, key); } /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */ inline Index searchLowerIndex(size_t start, size_t end, Index key) const { while(end>start) { size_t mid = (end+start)>>1; if (m_indices[mid](start); } /** \returns the stored value at index \a key * If the value does not exist, then the value \a defaultValue is returned without any insertion. */ inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const { if (m_size==0) return defaultValue; else if (key==m_indices[m_size-1]) return m_values[m_size-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) const size_t id = searchLowerIndex(0,m_size-1,key); return ((id=end) return Scalar(0); else if (end>start && key==m_indices[end-1]) return m_values[end-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) const size_t id = searchLowerIndex(start,end-1,key); return ((id=m_size || m_indices[id]!=key) { resize(m_size+1,1); for (size_t j=m_size-1; j>id; --j) { m_indices[j] = m_indices[j-1]; m_values[j] = m_values[j-1]; } m_indices[id] = key; m_values[id] = defaultValue; } return m_values[id]; } void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits::dummy_precision()) { size_t k = 0; size_t n = size(); for (size_t i=0; i // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_BLOCK_H #define EIGEN_SPARSE_BLOCK_H namespace Eigen { template class BlockImpl : public SparseMatrixBase > { typedef typename internal::remove_all::type _MatrixTypeNested; typedef Block BlockType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; protected: enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; public: EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) class InnerIterator: public XprType::InnerIterator { typedef typename BlockImpl::Index Index; public: inline InnerIterator(const BlockType& xpr, Index outer) : XprType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; class ReverseInnerIterator: public XprType::ReverseInnerIterator { typedef typename BlockImpl::Index Index; public: inline ReverseInnerIterator(const BlockType& xpr, Index outer) : XprType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; inline BlockImpl(const XprType& xpr, int i) : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) {} inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols) : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) {} inline const Scalar coeff(int row, int col) const { return m_matrix.coeff(row + IsRowMajor ? m_outerStart : 0, col +IsRowMajor ? 0 : m_outerStart); } inline const Scalar coeff(int index) const { return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); } EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: typename XprType::Nested m_matrix; Index m_outerStart; const internal::variable_if_dynamic m_outerSize; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) private: Index nonZeros() const; }; /*************************************************************************** * specialisation for SparseMatrix ***************************************************************************/ template class BlockImpl,BlockRows,BlockCols,true,Sparse> : public SparseMatrixBase,BlockRows,BlockCols,true> > { typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; typedef typename internal::remove_all::type _MatrixTypeNested; typedef Block BlockType; typedef Block ConstBlockType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) protected: enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; public: class InnerIterator: public SparseMatrixType::InnerIterator { public: inline InnerIterator(const BlockType& xpr, Index outer) : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator { public: inline ReverseInnerIterator(const BlockType& xpr, Index outer) : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; inline BlockImpl(const SparseMatrixType& xpr, int i) : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) {} inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols) : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) {} template inline BlockType& operator=(const SparseMatrixBase& other) { typedef typename internal::remove_all::type _NestedMatrixType; _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);; // This assignement is slow if this vector set is not empty // and/or it is not at the end of the nonzeros of the underlying matrix. // 1 - eval to a temporary to avoid transposition and/or aliasing issues SparseMatrix tmp(other); // 2 - let's check whether there is enough allocated memory Index nnz = tmp.nonZeros(); Index start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending posiiton of the current block Index block_size = end - start; // available room in the current block Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; Index free_size = m_matrix.isCompressed() ? Index(matrix.data().allocatedSize()) + block_size : block_size; if(nnz>free_size) { // realloc manually to reduce copies typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz); std::memcpy(&newdata.value(0), &m_matrix.data().value(0), start*sizeof(Scalar)); std::memcpy(&newdata.index(0), &m_matrix.data().index(0), start*sizeof(Index)); std::memcpy(&newdata.value(start), &tmp.data().value(0), nnz*sizeof(Scalar)); std::memcpy(&newdata.index(start), &tmp.data().index(0), nnz*sizeof(Index)); std::memcpy(&newdata.value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar)); std::memcpy(&newdata.index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index)); newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz); matrix.data().swap(newdata); } else { // no need to realloc, simply copy the tail at its respective position and insert tmp matrix.data().resize(start + nnz + tail_size); std::memmove(&matrix.data().value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar)); std::memmove(&matrix.data().index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index)); std::memcpy(&matrix.data().value(start), &tmp.data().value(0), nnz*sizeof(Scalar)); std::memcpy(&matrix.data().index(start), &tmp.data().index(0), nnz*sizeof(Index)); } // update innerNonZeros if(!m_matrix.isCompressed()) for(Index j=0; j(other); } inline const Scalar* valuePtr() const { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } inline Scalar* valuePtr() { return m_matrix.const_cast_derived().valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } inline const Index* innerIndexPtr() const { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } inline Index* innerIndexPtr() { return m_matrix.const_cast_derived().innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } inline const Index* outerIndexPtr() const { return m_matrix.outerIndexPtr() + m_outerStart; } inline Index* outerIndexPtr() { return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; } Index nonZeros() const { if(m_matrix.isCompressed()) return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]) - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]); else if(m_outerSize.value()==0) return 0; else return Map >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); } inline Scalar& coeffRef(int row, int col) { return m_matrix.const_cast_derived().coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); } inline const Scalar coeff(int row, int col) const { return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); } inline const Scalar coeff(int index) const { return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); } const Scalar& lastCoeff() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl); eigen_assert(nonZeros()>0); if(m_matrix.isCompressed()) return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; else return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; } EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: typename SparseMatrixType::Nested m_matrix; Index m_outerStart; const internal::variable_if_dynamic m_outerSize; }; template class BlockImpl,BlockRows,BlockCols,true,Sparse> : public SparseMatrixBase,BlockRows,BlockCols,true> > { typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; typedef typename internal::remove_all::type _MatrixTypeNested; typedef Block BlockType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) protected: enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; public: class InnerIterator: public SparseMatrixType::InnerIterator { public: inline InnerIterator(const BlockType& xpr, Index outer) : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator { public: inline ReverseInnerIterator(const BlockType& xpr, Index outer) : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; inline BlockImpl(const SparseMatrixType& xpr, int i) : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) {} inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols) : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) {} inline const Scalar* valuePtr() const { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } inline const Index* innerIndexPtr() const { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } inline const Index* outerIndexPtr() const { return m_matrix.outerIndexPtr() + m_outerStart; } Index nonZeros() const { if(m_matrix.isCompressed()) return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]) - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]); else if(m_outerSize.value()==0) return 0; else return Map >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); } inline const Scalar coeff(int row, int col) const { return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); } inline const Scalar coeff(int index) const { return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); } const Scalar& lastCoeff() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl); eigen_assert(nonZeros()>0); if(m_matrix.isCompressed()) return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; else return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; } EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: typename SparseMatrixType::Nested m_matrix; Index m_outerStart; const internal::variable_if_dynamic m_outerSize; }; //---------- /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). */ template typename SparseMatrixBase::InnerVectorReturnType SparseMatrixBase::innerVector(Index outer) { return InnerVectorReturnType(derived(), outer); } /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). Read-only. */ template const typename SparseMatrixBase::ConstInnerVectorReturnType SparseMatrixBase::innerVector(Index outer) const { return ConstInnerVectorReturnType(derived(), outer); } /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). */ template typename SparseMatrixBase::InnerVectorsReturnType SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) { return Block(derived(), IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); } /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). Read-only. */ template const typename SparseMatrixBase::ConstInnerVectorsReturnType SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) const { return Block(derived(), IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); } /** Generic implementation of sparse Block expression. * Real-only. */ template class BlockImpl : public SparseMatrixBase >, internal::no_assignment_operator { typedef typename internal::remove_all::type _MatrixTypeNested; typedef Block BlockType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) /** Column or Row constructor */ inline BlockImpl(const XprType& xpr, int i) : m_matrix(xpr), m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), m_blockRows(BlockRows==1 ? 1 : xpr.rows()), m_blockCols(BlockCols==1 ? 1 : xpr.cols()) {} /** Dynamic-size constructor */ inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols) : m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) {} inline int rows() const { return m_blockRows.value(); } inline int cols() const { return m_blockCols.value(); } inline Scalar& coeffRef(int row, int col) { return m_matrix.const_cast_derived() .coeffRef(row + m_startRow.value(), col + m_startCol.value()); } inline const Scalar coeff(int row, int col) const { return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value()); } inline Scalar& coeffRef(int index) { return m_matrix.const_cast_derived() .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } inline const Scalar coeff(int index) const { return m_matrix .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } class InnerIterator : public _MatrixTypeNested::InnerIterator { typedef typename _MatrixTypeNested::InnerIterator Base; const BlockType& m_block; Index m_end; public: EIGEN_STRONG_INLINE InnerIterator(const BlockType& block, Index outer) : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), m_block(block), m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value()) { while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) ) Base::operator++(); } inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } inline Index row() const { return Base::row() - m_block.m_startRow.value(); } inline Index col() const { return Base::col() - m_block.m_startCol.value(); } inline operator bool() const { return Base::operator bool() && Base::index() < m_end; } }; class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator { typedef typename _MatrixTypeNested::ReverseInnerIterator Base; const BlockType& m_block; Index m_begin; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const BlockType& block, Index outer) : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), m_block(block), m_begin(IsRowMajor ? block.m_startCol.value() : block.m_startRow.value()) { while( (Base::operator bool()) && (Base::index() >= (IsRowMajor ? m_block.m_startCol.value()+block.m_blockCols.value() : m_block.m_startRow.value()+block.m_blockRows.value())) ) Base::operator--(); } inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } inline Index row() const { return Base::row() - m_block.m_startRow.value(); } inline Index col() const { return Base::col() - m_block.m_startCol.value(); } inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; } }; protected: friend class InnerIterator; friend class ReverseInnerIterator; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) typename XprType::Nested m_matrix; const internal::variable_if_dynamic m_startRow; const internal::variable_if_dynamic m_startCol; const internal::variable_if_dynamic m_blockRows; const internal::variable_if_dynamic m_blockCols; }; } // end namespace Eigen #endif // EIGEN_SPARSE_BLOCK_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseCwiseUnaryOp.h0000644000176200001440000001351112253717461024342 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H #define EIGEN_SPARSE_CWISE_UNARY_OP_H namespace Eigen { template class CwiseUnaryOpImpl : public SparseMatrixBase > { public: class InnerIterator; class ReverseInnerIterator; typedef CwiseUnaryOp Derived; EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) protected: typedef typename internal::traits::_XprTypeNested _MatrixTypeNested; typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator; }; template class CwiseUnaryOpImpl::InnerIterator : public CwiseUnaryOpImpl::MatrixTypeIterator { typedef typename CwiseUnaryOpImpl::Scalar Scalar; typedef typename CwiseUnaryOpImpl::MatrixTypeIterator Base; public: EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer) : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) {} EIGEN_STRONG_INLINE InnerIterator& operator++() { Base::operator++(); return *this; } EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); } protected: const UnaryOp m_functor; private: typename CwiseUnaryOpImpl::Scalar& valueRef(); }; template class CwiseUnaryOpImpl::ReverseInnerIterator : public CwiseUnaryOpImpl::MatrixTypeReverseIterator { typedef typename CwiseUnaryOpImpl::Scalar Scalar; typedef typename CwiseUnaryOpImpl::MatrixTypeReverseIterator Base; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer) : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) {} EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() { Base::operator--(); return *this; } EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); } protected: const UnaryOp m_functor; private: typename CwiseUnaryOpImpl::Scalar& valueRef(); }; template class CwiseUnaryViewImpl : public SparseMatrixBase > { public: class InnerIterator; class ReverseInnerIterator; typedef CwiseUnaryView Derived; EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) protected: typedef typename internal::traits::_MatrixTypeNested _MatrixTypeNested; typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator; }; template class CwiseUnaryViewImpl::InnerIterator : public CwiseUnaryViewImpl::MatrixTypeIterator { typedef typename CwiseUnaryViewImpl::Scalar Scalar; typedef typename CwiseUnaryViewImpl::MatrixTypeIterator Base; public: EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer) : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) {} EIGEN_STRONG_INLINE InnerIterator& operator++() { Base::operator++(); return *this; } EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); } EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); } protected: const ViewOp m_functor; }; template class CwiseUnaryViewImpl::ReverseInnerIterator : public CwiseUnaryViewImpl::MatrixTypeReverseIterator { typedef typename CwiseUnaryViewImpl::Scalar Scalar; typedef typename CwiseUnaryViewImpl::MatrixTypeReverseIterator Base; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer) : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) {} EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() { Base::operator--(); return *this; } EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); } EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); } protected: const ViewOp m_functor; }; template EIGEN_STRONG_INLINE Derived& SparseMatrixBase::operator*=(const Scalar& other) { for (Index j=0; j EIGEN_STRONG_INLINE Derived& SparseMatrixBase::operator/=(const Scalar& other) { for (Index j=0; j // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H #define EIGEN_SPARSE_CWISE_BINARY_OP_H namespace Eigen { // Here we have to handle 3 cases: // 1 - sparse op dense // 2 - dense op sparse // 3 - sparse op sparse // We also need to implement a 4th iterator for: // 4 - dense op dense // Finally, we also need to distinguish between the product and other operations : // configuration returned mode // 1 - sparse op dense product sparse // generic dense // 2 - dense op sparse product sparse // generic dense // 3 - sparse op sparse product sparse // generic sparse // 4 - dense op dense product dense // generic dense namespace internal { template<> struct promote_storage_type { typedef Sparse ret; }; template<> struct promote_storage_type { typedef Sparse ret; }; template::StorageKind, typename _RhsStorageMode = typename traits::StorageKind> class sparse_cwise_binary_op_inner_iterator_selector; } // end namespace internal template class CwiseBinaryOpImpl : public SparseMatrixBase > { public: class InnerIterator; class ReverseInnerIterator; typedef CwiseBinaryOp Derived; EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) CwiseBinaryOpImpl() { typedef typename internal::traits::StorageKind LhsStorageKind; typedef typename internal::traits::StorageKind RhsStorageKind; EIGEN_STATIC_ASSERT(( (!internal::is_same::value) || ((Lhs::Flags&RowMajorBit) == (Rhs::Flags&RowMajorBit))), THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH); } }; template class CwiseBinaryOpImpl::InnerIterator : public internal::sparse_cwise_binary_op_inner_iterator_selector::InnerIterator> { public: typedef typename Lhs::Index Index; typedef internal::sparse_cwise_binary_op_inner_iterator_selector< BinaryOp,Lhs,Rhs, InnerIterator> Base; // NOTE: we have to prefix Index by "typename Lhs::" to avoid an ICE with VC11 EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename Lhs::Index outer) : Base(binOp.derived(),outer) {} }; /*************************************************************************** * Implementation of inner-iterators ***************************************************************************/ // template struct internal::func_is_conjunction { enum { ret = false }; }; // template struct internal::func_is_conjunction > { enum { ret = true }; }; // TODO generalize the internal::scalar_product_op specialization to all conjunctions if any ! namespace internal { // sparse - sparse (generic) template class sparse_cwise_binary_op_inner_iterator_selector { typedef CwiseBinaryOp CwiseBinaryXpr; typedef typename traits::Scalar Scalar; typedef typename traits::_LhsNested _LhsNested; typedef typename traits::_RhsNested _RhsNested; typedef typename _LhsNested::InnerIterator LhsIterator; typedef typename _RhsNested::InnerIterator RhsIterator; typedef typename Lhs::Index Index; public: EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()) { this->operator++(); } EIGEN_STRONG_INLINE Derived& operator++() { if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index())) { m_id = m_lhsIter.index(); m_value = m_functor(m_lhsIter.value(), m_rhsIter.value()); ++m_lhsIter; ++m_rhsIter; } else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index()))) { m_id = m_lhsIter.index(); m_value = m_functor(m_lhsIter.value(), Scalar(0)); ++m_lhsIter; } else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index()))) { m_id = m_rhsIter.index(); m_value = m_functor(Scalar(0), m_rhsIter.value()); ++m_rhsIter; } else { m_value = 0; // this is to avoid a compilation warning m_id = -1; } return *static_cast(this); } EIGEN_STRONG_INLINE Scalar value() const { return m_value; } EIGEN_STRONG_INLINE Index index() const { return m_id; } EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); } EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; } protected: LhsIterator m_lhsIter; RhsIterator m_rhsIter; const BinaryOp& m_functor; Scalar m_value; Index m_id; }; // sparse - sparse (product) template class sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Sparse, Sparse> { typedef scalar_product_op BinaryFunc; typedef CwiseBinaryOp CwiseBinaryXpr; typedef typename CwiseBinaryXpr::Scalar Scalar; typedef typename traits::_LhsNested _LhsNested; typedef typename _LhsNested::InnerIterator LhsIterator; typedef typename traits::_RhsNested _RhsNested; typedef typename _RhsNested::InnerIterator RhsIterator; typedef typename Lhs::Index Index; public: EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()) { while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index())) { if (m_lhsIter.index() < m_rhsIter.index()) ++m_lhsIter; else ++m_rhsIter; } } EIGEN_STRONG_INLINE Derived& operator++() { ++m_lhsIter; ++m_rhsIter; while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index())) { if (m_lhsIter.index() < m_rhsIter.index()) ++m_lhsIter; else ++m_rhsIter; } return *static_cast(this); } EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); } EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); } protected: LhsIterator m_lhsIter; RhsIterator m_rhsIter; const BinaryFunc& m_functor; }; // sparse - dense (product) template class sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Sparse, Dense> { typedef scalar_product_op BinaryFunc; typedef CwiseBinaryOp CwiseBinaryXpr; typedef typename CwiseBinaryXpr::Scalar Scalar; typedef typename traits::_LhsNested _LhsNested; typedef typename traits::RhsNested RhsNested; typedef typename _LhsNested::InnerIterator LhsIterator; typedef typename Lhs::Index Index; enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit }; public: EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer) {} EIGEN_STRONG_INLINE Derived& operator++() { ++m_lhsIter; return *static_cast(this); } EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); } EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; } protected: RhsNested m_rhs; LhsIterator m_lhsIter; const BinaryFunc m_functor; const Index m_outer; }; // sparse - dense (product) template class sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Dense, Sparse> { typedef scalar_product_op BinaryFunc; typedef CwiseBinaryOp CwiseBinaryXpr; typedef typename CwiseBinaryXpr::Scalar Scalar; typedef typename traits::_RhsNested _RhsNested; typedef typename _RhsNested::InnerIterator RhsIterator; typedef typename Lhs::Index Index; enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit }; public: EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer) {} EIGEN_STRONG_INLINE Derived& operator++() { ++m_rhsIter; return *static_cast(this); } EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); } EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); } EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); } EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; } protected: const CwiseBinaryXpr& m_xpr; RhsIterator m_rhsIter; const BinaryFunc& m_functor; const Index m_outer; }; } // end namespace internal /*************************************************************************** * Implementation of SparseMatrixBase and SparseCwise functions/operators ***************************************************************************/ template template EIGEN_STRONG_INLINE Derived & SparseMatrixBase::operator-=(const SparseMatrixBase &other) { return derived() = derived() - other.derived(); } template template EIGEN_STRONG_INLINE Derived & SparseMatrixBase::operator+=(const SparseMatrixBase& other) { return derived() = derived() + other.derived(); } template template EIGEN_STRONG_INLINE const typename SparseMatrixBase::template CwiseProductDenseReturnType::Type SparseMatrixBase::cwiseProduct(const MatrixBase &other) const { return typename CwiseProductDenseReturnType::Type(derived(), other.derived()); } } // end namespace Eigen #endif // EIGEN_SPARSE_CWISE_BINARY_OP_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseSparseProductWithPruning.h0000644000176200001440000001433312305236565026751 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H #define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H namespace Eigen { namespace internal { // perform a pseudo in-place sparse * sparse product assuming all matrices are col major template static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance) { // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res); typedef typename remove_all::type::Scalar Scalar; typedef typename remove_all::type::Index Index; // make sure to call innerSize/outerSize since we fake the storage order. Index rows = lhs.innerSize(); Index cols = rhs.outerSize(); //Index size = lhs.outerSize(); eigen_assert(lhs.outerSize() == rhs.innerSize()); // allocate a temporary buffer AmbiVector tempVector(rows); // estimate the number of non zero entries // given a rhs column containing Y non zeros, we assume that the respective Y columns // of the lhs differs in average of one non zeros, thus the number of non zeros for // the product of a rhs column with the lhs is X+Y where X is the average number of non zero // per column of the lhs. // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); // mimics a resizeByInnerOuter: if(ResultType::IsRowMajor) res.resize(cols, rows); else res.resize(rows, cols); res.reserve(estimated_nnz_prod); double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols()); for (Index j=0; j::Iterator it(tempVector,tolerance); it; ++it) res.insertBackByOuterInner(j,it.index()) = it.value(); } res.finalize(); } template::Flags&RowMajorBit, int RhsStorageOrder = traits::Flags&RowMajorBit, int ResStorageOrder = traits::Flags&RowMajorBit> struct sparse_sparse_product_with_pruning_selector; template struct sparse_sparse_product_with_pruning_selector { typedef typename traits::type>::Scalar Scalar; typedef typename ResultType::RealScalar RealScalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) { typename remove_all::type _res(res.rows(), res.cols()); internal::sparse_sparse_product_with_pruning_impl(lhs, rhs, _res, tolerance); res.swap(_res); } }; template struct sparse_sparse_product_with_pruning_selector { typedef typename ResultType::RealScalar RealScalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) { // we need a col-major matrix to hold the result typedef SparseMatrix SparseTemporaryType; SparseTemporaryType _res(res.rows(), res.cols()); internal::sparse_sparse_product_with_pruning_impl(lhs, rhs, _res, tolerance); res = _res; } }; template struct sparse_sparse_product_with_pruning_selector { typedef typename ResultType::RealScalar RealScalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) { // let's transpose the product to get a column x column product typename remove_all::type _res(res.rows(), res.cols()); internal::sparse_sparse_product_with_pruning_impl(rhs, lhs, _res, tolerance); res.swap(_res); } }; template struct sparse_sparse_product_with_pruning_selector { typedef typename ResultType::RealScalar RealScalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) { typedef SparseMatrix ColMajorMatrixLhs; typedef SparseMatrix ColMajorMatrixRhs; ColMajorMatrixLhs colLhs(lhs); ColMajorMatrixRhs colRhs(rhs); internal::sparse_sparse_product_with_pruning_impl(colLhs, colRhs, res, tolerance); // let's transpose the product to get a column x column product // typedef SparseMatrix SparseTemporaryType; // SparseTemporaryType _res(res.cols(), res.rows()); // sparse_sparse_product_with_pruning_impl(rhs, lhs, _res); // res = _res.transpose(); } }; // NOTE the 2 others cases (col row *) must never occur since they are caught // by ProductReturnType which transforms it to (col col *) by evaluating rhs. } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseMatrixBase.h0000644000176200001440000004525512647570720024025 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEMATRIXBASE_H #define EIGEN_SPARSEMATRIXBASE_H namespace Eigen { /** \ingroup SparseCore_Module * * \class SparseMatrixBase * * \brief Base class of any sparse matrices or sparse expressions * * \tparam Derived * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN. */ template class SparseMatrixBase #ifndef EIGEN_PARSED_BY_DOXYGEN : public internal::special_scalar_op_base::Scalar, typename NumTraits::Scalar>::Real, EigenBase > #else : public EigenBase #endif // not EIGEN_PARSED_BY_DOXYGEN { public: typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; typedef typename internal::add_const_on_value_type_if_arithmetic< typename internal::packet_traits::type >::type PacketReturnType; typedef SparseMatrixBase StorageBaseType; template Derived& operator=(const EigenBase &other) { other.derived().evalTo(derived()); return derived(); } enum { RowsAtCompileTime = internal::traits::RowsAtCompileTime, /**< The number of rows at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ ColsAtCompileTime = internal::traits::ColsAtCompileTime, /**< The number of columns at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, internal::traits::ColsAtCompileTime>::ret), /**< This is equal to the number of coefficients, i.e. the number of * rows times the number of columns, or to \a Dynamic if this is not * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, MaxSizeAtCompileTime = (internal::size_at_compile_time::ret), IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1, /**< This is set to true if either the number of rows or the number of * columns is known at compile-time to be equal to 1. Indeed, in that case, * we are dealing with a column-vector (if there is only one column) or with * a row-vector (if there is only one row). */ Flags = internal::traits::Flags, /**< This stores expression \ref flags flags which may or may not be inherited by new expressions * constructed from this one. See the \ref flags "list of flags". */ CoeffReadCost = internal::traits::CoeffReadCost, /**< This is a rough measure of how expensive it is to read one coefficient from * this expression. */ IsRowMajor = Flags&RowMajorBit ? 1 : 0, InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), #ifndef EIGEN_PARSED_BY_DOXYGEN _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC #endif }; /** \internal the return type of MatrixBase::adjoint() */ typedef typename internal::conditional::IsComplex, CwiseUnaryOp, Eigen::Transpose >, Transpose >::type AdjointReturnType; typedef SparseMatrix PlainObject; #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is the "real scalar" type; if the \a Scalar type is already real numbers * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If * \a Scalar is \a std::complex then RealScalar is \a T. * * \sa class NumTraits */ typedef typename NumTraits::Real RealScalar; /** \internal the return type of coeff() */ typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType; /** \internal Represents a matrix with all coefficients equal to one another*/ typedef CwiseNullaryOp,Matrix > ConstantReturnType; /** type of the equivalent square matrix */ typedef Matrix SquareMatrixType; inline const Derived& derived() const { return *static_cast(this); } inline Derived& derived() { return *static_cast(this); } inline Derived& const_cast_derived() const { return *static_cast(const_cast(this)); } typedef internal::special_scalar_op_base > Base; using Base::operator*; #endif // not EIGEN_PARSED_BY_DOXYGEN #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase # include "../plugins/CommonCwiseUnaryOps.h" # include "../plugins/CommonCwiseBinaryOps.h" # include "../plugins/MatrixCwiseUnaryOps.h" # include "../plugins/MatrixCwiseBinaryOps.h" # include "../plugins/BlockMethods.h" # ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN # include EIGEN_SPARSEMATRIXBASE_PLUGIN # endif # undef EIGEN_CURRENT_STORAGE_BASE_CLASS #undef EIGEN_CURRENT_STORAGE_BASE_CLASS /** \returns the number of rows. \sa cols() */ inline Index rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows() */ inline Index cols() const { return derived().cols(); } /** \returns the number of coefficients, which is \a rows()*cols(). * \sa rows(), cols(). */ inline Index size() const { return rows() * cols(); } /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ inline Index nonZeros() const { return derived().nonZeros(); } /** \returns true if either the number of rows or the number of columns is equal to 1. * In other words, this function returns * \code rows()==1 || cols()==1 \endcode * \sa rows(), cols(), IsVectorAtCompileTime. */ inline bool isVector() const { return rows()==1 || cols()==1; } /** \returns the size of the storage major dimension, * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */ Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } /** \returns the size of the inner dimension according to the storage order, * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */ Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } bool isRValue() const { return m_isRValue; } Derived& markAsRValue() { m_isRValue = true; return derived(); } SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ } template Derived& operator=(const ReturnByValue& other) { other.evalTo(derived()); return derived(); } template inline Derived& operator=(const SparseMatrixBase& other) { return assign(other.derived()); } inline Derived& operator=(const Derived& other) { // if (other.isRValue()) // derived().swap(other.const_cast_derived()); // else return assign(other.derived()); } protected: template inline Derived& assign(const OtherDerived& other) { const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols(); if ((!transpose) && other.isRValue()) { // eval without temporary derived().resize(other.rows(), other.cols()); derived().setZero(); derived().reserve((std::max)(this->rows(),this->cols())*2); for (Index j=0; j inline void assignGeneric(const OtherDerived& other) { //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); eigen_assert(( ((internal::traits::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) || (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) && "the transpose operation is supposed to be handled in SparseMatrix::operator="); enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) }; const Index outerSize = other.outerSize(); //typedef typename internal::conditional, Derived>::type TempType; // thanks to shallow copies, we always eval to a tempary Derived temp(other.rows(), other.cols()); temp.reserve((std::max)(this->rows(),this->cols())*2); for (Index j=0; j inline Derived& operator=(const SparseSparseProduct& product); friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m) { typedef typename Derived::Nested Nested; typedef typename internal::remove_all::type NestedCleaned; if (Flags&RowMajorBit) { const Nested nm(m.derived()); for (Index row=0; row trans = m; s << static_cast >&>(trans); } } return s; } template Derived& operator+=(const SparseMatrixBase& other); template Derived& operator-=(const SparseMatrixBase& other); Derived& operator*=(const Scalar& other); Derived& operator/=(const Scalar& other); template struct CwiseProductDenseReturnType { typedef CwiseBinaryOp::Scalar, typename internal::traits::Scalar >::ReturnType>, const Derived, const OtherDerived > Type; }; template EIGEN_STRONG_INLINE const typename CwiseProductDenseReturnType::Type cwiseProduct(const MatrixBase &other) const; // sparse * sparse template const typename SparseSparseProductReturnType::Type operator*(const SparseMatrixBase &other) const; // sparse * diagonal template const SparseDiagonalProduct operator*(const DiagonalBase &other) const; // diagonal * sparse template friend const SparseDiagonalProduct operator*(const DiagonalBase &lhs, const SparseMatrixBase& rhs) { return SparseDiagonalProduct(lhs.derived(), rhs.derived()); } /** dense * sparse (return a dense object unless it is an outer product) */ template friend const typename DenseSparseProductReturnType::Type operator*(const MatrixBase& lhs, const Derived& rhs) { return typename DenseSparseProductReturnType::Type(lhs.derived(),rhs); } /** sparse * dense (returns a dense object unless it is an outer product) */ template const typename SparseDenseProductReturnType::Type operator*(const MatrixBase &other) const { return typename SparseDenseProductReturnType::Type(derived(), other.derived()); } /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */ SparseSymmetricPermutationProduct twistedBy(const PermutationMatrix& perm) const { return SparseSymmetricPermutationProduct(derived(), perm); } template Derived& operator*=(const SparseMatrixBase& other); #ifdef EIGEN2_SUPPORT // deprecated template typename internal::plain_matrix_type_column_major::type solveTriangular(const MatrixBase& other) const; // deprecated template void solveTriangularInPlace(MatrixBase& other) const; #endif // EIGEN2_SUPPORT template inline const SparseTriangularView triangularView() const; template inline const SparseSelfAdjointView selfadjointView() const; template inline SparseSelfAdjointView selfadjointView(); template Scalar dot(const MatrixBase& other) const; template Scalar dot(const SparseMatrixBase& other) const; RealScalar squaredNorm() const; RealScalar norm() const; RealScalar blueNorm() const; Transpose transpose() { return derived(); } const Transpose transpose() const { return derived(); } const AdjointReturnType adjoint() const { return transpose(); } // inner-vector typedef Block InnerVectorReturnType; typedef Block ConstInnerVectorReturnType; InnerVectorReturnType innerVector(Index outer); const ConstInnerVectorReturnType innerVector(Index outer) const; // set of inner-vectors typedef Block InnerVectorsReturnType; typedef Block ConstInnerVectorsReturnType; InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize); const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const; /** \internal use operator= */ template void evalTo(MatrixBase& dst) const { dst.setZero(); for (Index j=0; j toDense() const { return derived(); } template bool isApprox(const SparseMatrixBase& other, const RealScalar& prec = NumTraits::dummy_precision()) const { return toDense().isApprox(other.toDense(),prec); } template bool isApprox(const MatrixBase& other, const RealScalar& prec = NumTraits::dummy_precision()) const { return toDense().isApprox(other,prec); } /** \returns the matrix or vector obtained by evaluating this expression. * * Notice that in the case of a plain matrix or vector (not an expression) this function just returns * a const reference, in order to avoid a useless copy. */ inline const typename internal::eval::type eval() const { return typename internal::eval::type(derived()); } Scalar sum() const; protected: bool m_isRValue; }; } // end namespace Eigen #endif // EIGEN_SPARSEMATRIXBASE_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseDiagonalProduct.h0000644000176200001440000001702712501315103025016 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H #define EIGEN_SPARSE_DIAGONAL_PRODUCT_H namespace Eigen { // The product of a diagonal matrix with a sparse matrix can be easily // implemented using expression template. // We have two consider very different cases: // 1 - diag * row-major sparse // => each inner vector <=> scalar * sparse vector product // => so we can reuse CwiseUnaryOp::InnerIterator // 2 - diag * col-major sparse // => each inner vector <=> densevector * sparse vector cwise product // => again, we can reuse specialization of CwiseBinaryOp::InnerIterator // for that particular case // The two other cases are symmetric. namespace internal { template struct traits > { typedef typename remove_all::type _Lhs; typedef typename remove_all::type _Rhs; typedef typename _Lhs::Scalar Scalar; typedef typename promote_index_type::Index, typename traits::Index>::type Index; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = _Lhs::RowsAtCompileTime, ColsAtCompileTime = _Rhs::ColsAtCompileTime, MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime, MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime, SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags), Flags = (SparseFlags&RowMajorBit), CoeffReadCost = Dynamic }; }; enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor}; template class sparse_diagonal_product_inner_iterator_selector; } // end namespace internal template class SparseDiagonalProduct : public SparseMatrixBase >, internal::no_assignment_operator { typedef typename Lhs::Nested LhsNested; typedef typename Rhs::Nested RhsNested; typedef typename internal::remove_all::type _LhsNested; typedef typename internal::remove_all::type _RhsNested; enum { LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor, RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor }; public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct) typedef internal::sparse_diagonal_product_inner_iterator_selector <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator; // We do not want ReverseInnerIterator for diagonal-sparse products, // but this dummy declaration is needed to make diag * sparse * diag compile. class ReverseInnerIterator; EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product"); } EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); } EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } protected: LhsNested m_lhs; RhsNested m_rhs; }; namespace internal { template class sparse_diagonal_product_inner_iterator_selector : public CwiseUnaryOp,const Rhs>::InnerIterator { typedef typename CwiseUnaryOp,const Rhs>::InnerIterator Base; typedef typename Lhs::Index Index; public: inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer) {} }; template class sparse_diagonal_product_inner_iterator_selector : public CwiseBinaryOp< scalar_product_op, const typename Rhs::ConstInnerVectorReturnType, const typename Lhs::DiagonalVectorType>::InnerIterator { typedef typename CwiseBinaryOp< scalar_product_op, const typename Rhs::ConstInnerVectorReturnType, const typename Lhs::DiagonalVectorType>::InnerIterator Base; typedef typename Lhs::Index Index; Index m_outer; public: inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0), m_outer(outer) {} inline Index outer() const { return m_outer; } inline Index col() const { return m_outer; } }; template class sparse_diagonal_product_inner_iterator_selector : public CwiseUnaryOp,const Lhs>::InnerIterator { typedef typename CwiseUnaryOp,const Lhs>::InnerIterator Base; typedef typename Lhs::Index Index; public: inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer) {} }; template class sparse_diagonal_product_inner_iterator_selector : public CwiseBinaryOp< scalar_product_op, const typename Lhs::ConstInnerVectorReturnType, const Transpose >::InnerIterator { typedef typename CwiseBinaryOp< scalar_product_op, const typename Lhs::ConstInnerVectorReturnType, const Transpose >::InnerIterator Base; typedef typename Lhs::Index Index; Index m_outer; public: inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0), m_outer(outer) {} inline Index outer() const { return m_outer; } inline Index row() const { return m_outer; } }; } // end namespace internal // SparseMatrixBase functions template template const SparseDiagonalProduct SparseMatrixBase::operator*(const DiagonalBase &other) const { return SparseDiagonalProduct(this->derived(), other.derived()); } } // end namespace Eigen #endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H RcppEigen/inst/include/Eigen/src/SparseCore/MappedSparseMatrix.h0000644000176200001440000001415212501315103024326 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MAPPED_SPARSEMATRIX_H #define EIGEN_MAPPED_SPARSEMATRIX_H namespace Eigen { /** \class MappedSparseMatrix * * \brief Sparse matrix * * \param _Scalar the scalar type, i.e. the type of the coefficients * * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. * */ namespace internal { template struct traits > : traits > {}; } template class MappedSparseMatrix : public SparseMatrixBase > { public: EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix) enum { IsRowMajor = Base::IsRowMajor }; protected: Index m_outerSize; Index m_innerSize; Index m_nnz; Index* m_outerIndex; Index* m_innerIndices; Scalar* m_values; public: inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } inline Index innerSize() const { return m_innerSize; } inline Index outerSize() const { return m_outerSize; } bool isCompressed() const { return true; } //---------------------------------------- // direct access interface inline const Scalar* valuePtr() const { return m_values; } inline Scalar* valuePtr() { return m_values; } inline const Index* innerIndexPtr() const { return m_innerIndices; } inline Index* innerIndexPtr() { return m_innerIndices; } inline const Index* outerIndexPtr() const { return m_outerIndex; } inline Index* outerIndexPtr() { return m_outerIndex; } //---------------------------------------- inline Scalar coeff(Index row, Index col) const { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; Index start = m_outerIndex[outer]; Index end = m_outerIndex[outer+1]; if (start==end) return Scalar(0); else if (end>0 && inner==m_innerIndices[end-1]) return m_values[end-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner); const Index id = r-&m_innerIndices[0]; return ((*r==inner) && (id=start && "you probably called coeffRef on a non finalized matrix"); eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient"); Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner); const Index id = r-&m_innerIndices[0]; eigen_assert((*r==inner) && (id class MappedSparseMatrix::InnerIterator { public: InnerIterator(const MappedSparseMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), m_id(mat.outerIndexPtr()[outer]), m_start(m_id), m_end(mat.outerIndexPtr()[outer+1]) {} inline InnerIterator& operator++() { m_id++; return *this; } inline Scalar value() const { return m_matrix.valuePtr()[m_id]; } inline Scalar& valueRef() { return const_cast(m_matrix.valuePtr()[m_id]); } inline Index index() const { return m_matrix.innerIndexPtr()[m_id]; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); } protected: const MappedSparseMatrix& m_matrix; const Index m_outer; Index m_id; const Index m_start; const Index m_end; }; template class MappedSparseMatrix::ReverseInnerIterator { public: ReverseInnerIterator(const MappedSparseMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), m_id(mat.outerIndexPtr()[outer+1]), m_start(mat.outerIndexPtr()[outer]), m_end(m_id) {} inline ReverseInnerIterator& operator--() { m_id--; return *this; } inline Scalar value() const { return m_matrix.valuePtr()[m_id-1]; } inline Scalar& valueRef() { return const_cast(m_matrix.valuePtr()[m_id-1]); } inline Index index() const { return m_matrix.innerIndexPtr()[m_id-1]; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id <= m_end) && (m_id>m_start); } protected: const MappedSparseMatrix& m_matrix; const Index m_outer; Index m_id; const Index m_start; const Index m_end; }; } // end namespace Eigen #endif // EIGEN_MAPPED_SPARSEMATRIX_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseProduct.h0000644000176200001440000001534512501315103023360 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEPRODUCT_H #define EIGEN_SPARSEPRODUCT_H namespace Eigen { template struct SparseSparseProductReturnType { typedef typename internal::traits::Scalar Scalar; typedef typename internal::traits::Index Index; enum { LhsRowMajor = internal::traits::Flags & RowMajorBit, RhsRowMajor = internal::traits::Flags & RowMajorBit, TransposeRhs = (!LhsRowMajor) && RhsRowMajor, TransposeLhs = LhsRowMajor && (!RhsRowMajor) }; typedef typename internal::conditional, typename internal::nested::type>::type LhsNested; typedef typename internal::conditional, typename internal::nested::type>::type RhsNested; typedef SparseSparseProduct Type; }; namespace internal { template struct traits > { typedef MatrixXpr XprKind; // clean the nested types: typedef typename remove_all::type _LhsNested; typedef typename remove_all::type _RhsNested; typedef typename _LhsNested::Scalar Scalar; typedef typename promote_index_type::Index, typename traits<_RhsNested>::Index>::type Index; enum { LhsCoeffReadCost = _LhsNested::CoeffReadCost, RhsCoeffReadCost = _RhsNested::CoeffReadCost, LhsFlags = _LhsNested::Flags, RhsFlags = _RhsNested::Flags, RowsAtCompileTime = _LhsNested::RowsAtCompileTime, ColsAtCompileTime = _RhsNested::ColsAtCompileTime, MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime, MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime, InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime), EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit), RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit), Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits) | EvalBeforeAssigningBit | EvalBeforeNestingBit, CoeffReadCost = Dynamic }; typedef Sparse StorageKind; }; } // end namespace internal template class SparseSparseProduct : internal::no_assignment_operator, public SparseMatrixBase > { public: typedef SparseMatrixBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(SparseSparseProduct) private: typedef typename internal::traits::_LhsNested _LhsNested; typedef typename internal::traits::_RhsNested _RhsNested; public: template EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs), m_tolerance(0), m_conservative(true) { init(); } template EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, const RealScalar& tolerance) : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false) { init(); } SparseSparseProduct pruned(const Scalar& reference = 0, const RealScalar& epsilon = NumTraits::dummy_precision()) const { using std::abs; return SparseSparseProduct(m_lhs,m_rhs,abs(reference)*epsilon); } template void evalTo(Dest& result) const { if(m_conservative) internal::conservative_sparse_sparse_product_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result); else internal::sparse_sparse_product_with_pruning_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result,m_tolerance); } EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); } EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } protected: void init() { eigen_assert(m_lhs.cols() == m_rhs.rows()); enum { ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic || _RhsNested::RowsAtCompileTime==Dynamic || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime), AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime, SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested) }; // note to the lost user: // * for a dot product use: v1.dot(v2) // * for a coeff-wise product use: v1.cwise()*v2 EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) } LhsNested m_lhs; RhsNested m_rhs; RealScalar m_tolerance; bool m_conservative; }; // sparse = sparse * sparse template template inline Derived& SparseMatrixBase::operator=(const SparseSparseProduct& product) { product.evalTo(derived()); return derived(); } /** \returns an expression of the product of two sparse matrices. * By default a conservative product preserving the symbolic non zeros is performed. * The automatic pruning of the small values can be achieved by calling the pruned() function * in which case a totally different product algorithm is employed: * \code * C = (A*B).pruned(); // supress numerical zeros (exact) * C = (A*B).pruned(ref); * C = (A*B).pruned(ref,epsilon); * \endcode * where \c ref is a meaningful non zero reference value. * */ template template inline const typename SparseSparseProductReturnType::Type SparseMatrixBase::operator*(const SparseMatrixBase &other) const { return typename SparseSparseProductReturnType::Type(derived(), other.derived()); } } // end namespace Eigen #endif // EIGEN_SPARSEPRODUCT_H RcppEigen/inst/include/Eigen/src/SparseCore/AmbiVector.h0000644000176200001440000002346312501315103022615 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_AMBIVECTOR_H #define EIGEN_AMBIVECTOR_H namespace Eigen { namespace internal { /** \internal * Hybrid sparse/dense vector class designed for intensive read-write operations. * * See BasicSparseLLT and SparseProduct for usage examples. */ template class AmbiVector { public: typedef _Scalar Scalar; typedef _Index Index; typedef typename NumTraits::Real RealScalar; AmbiVector(Index size) : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1) { resize(size); } void init(double estimatedDensity); void init(int mode); Index nonZeros() const; /** Specifies a sub-vector to work on */ void setBounds(Index start, Index end) { m_start = start; m_end = end; } void setZero(); void restart(); Scalar& coeffRef(Index i); Scalar& coeff(Index i); class Iterator; ~AmbiVector() { delete[] m_buffer; } void resize(Index size) { if (m_allocatedSize < size) reallocate(size); m_size = size; } Index size() const { return m_size; } protected: void reallocate(Index size) { // if the size of the matrix is not too large, let's allocate a bit more than needed such // that we can handle dense vector even in sparse mode. delete[] m_buffer; if (size<1000) { Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar); m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl); m_buffer = new Scalar[allocSize]; } else { m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl); m_buffer = new Scalar[size]; } m_size = size; m_start = 0; m_end = m_size; } void reallocateSparse() { Index copyElements = m_allocatedElements; m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size); Index allocSize = m_allocatedElements * sizeof(ListEl); allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar); Scalar* newBuffer = new Scalar[allocSize]; memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl)); delete[] m_buffer; m_buffer = newBuffer; } protected: // element type of the linked list struct ListEl { Index next; Index index; Scalar value; }; // used to store data in both mode Scalar* m_buffer; Scalar m_zero; Index m_size; Index m_start; Index m_end; Index m_allocatedSize; Index m_allocatedElements; Index m_mode; // linked list mode Index m_llStart; Index m_llCurrent; Index m_llSize; }; /** \returns the number of non zeros in the current sub vector */ template _Index AmbiVector<_Scalar,_Index>::nonZeros() const { if (m_mode==IsSparse) return m_llSize; else return m_end - m_start; } template void AmbiVector<_Scalar,_Index>::init(double estimatedDensity) { if (estimatedDensity>0.1) init(IsDense); else init(IsSparse); } template void AmbiVector<_Scalar,_Index>::init(int mode) { m_mode = mode; if (m_mode==IsSparse) { m_llSize = 0; m_llStart = -1; } } /** Must be called whenever we might perform a write access * with an index smaller than the previous one. * * Don't worry, this function is extremely cheap. */ template void AmbiVector<_Scalar,_Index>::restart() { m_llCurrent = m_llStart; } /** Set all coefficients of current subvector to zero */ template void AmbiVector<_Scalar,_Index>::setZero() { if (m_mode==IsDense) { for (Index i=m_start; i _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) { if (m_mode==IsDense) return m_buffer[i]; else { ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_buffer); // TODO factorize the following code to reduce code generation eigen_assert(m_mode==IsSparse); if (m_llSize==0) { // this is the first element m_llStart = 0; m_llCurrent = 0; ++m_llSize; llElements[0].value = Scalar(0); llElements[0].index = i; llElements[0].next = -1; return llElements[0].value; } else if (i=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index"); while (nextel >= 0 && llElements[nextel].index<=i) { m_llCurrent = nextel; nextel = llElements[nextel].next; } if (llElements[m_llCurrent].index==i) { // the coefficient already exists and we found it ! return llElements[m_llCurrent].value; } else { if (m_llSize>=m_allocatedElements) { reallocateSparse(); llElements = reinterpret_cast(m_buffer); } eigen_internal_assert(m_llSize _Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i) { if (m_mode==IsDense) return m_buffer[i]; else { ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_buffer); eigen_assert(m_mode==IsSparse); if ((m_llSize==0) || (i= 0 && llElements[elid].index class AmbiVector<_Scalar,_Index>::Iterator { public: typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; /** Default constructor * \param vec the vector on which we iterate * \param epsilon the minimal value used to prune zero coefficients. * In practice, all coefficients having a magnitude smaller than \a epsilon * are skipped. */ Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0) : m_vector(vec) { using std::abs; m_epsilon = epsilon; m_isDense = m_vector.m_mode==IsDense; if (m_isDense) { m_currentEl = 0; // this is to avoid a compilation warning m_cachedValue = 0; // this is to avoid a compilation warning m_cachedIndex = m_vector.m_start-1; ++(*this); } else { ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_vector.m_buffer); m_currentEl = m_vector.m_llStart; while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon) m_currentEl = llElements[m_currentEl].next; if (m_currentEl<0) { m_cachedValue = 0; // this is to avoid a compilation warning m_cachedIndex = -1; } else { m_cachedIndex = llElements[m_currentEl].index; m_cachedValue = llElements[m_currentEl].value; } } } Index index() const { return m_cachedIndex; } Scalar value() const { return m_cachedValue; } operator bool() const { return m_cachedIndex>=0; } Iterator& operator++() { using std::abs; if (m_isDense) { do { ++m_cachedIndex; } while (m_cachedIndex(m_vector.m_buffer); do { m_currentEl = llElements[m_currentEl].next; } while (m_currentEl>=0 && abs(llElements[m_currentEl].value) // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEUTIL_H #define EIGEN_SPARSEUTIL_H namespace Eigen { #ifdef NDEBUG #define EIGEN_DBG_SPARSE(X) #else #define EIGEN_DBG_SPARSE(X) X #endif #define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \ template \ EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase& other) \ { \ return Base::operator Op(other.derived()); \ } \ EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \ { \ return Base::operator Op(other); \ } #define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \ template \ EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \ { \ return Base::operator Op(scalar); \ } #define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) #define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \ typedef BaseClass Base; \ typedef typename Eigen::internal::traits::Scalar Scalar; \ typedef typename Eigen::NumTraits::Real RealScalar; \ typedef typename Eigen::internal::nested::type Nested; \ typedef typename Eigen::internal::traits::StorageKind StorageKind; \ typedef typename Eigen::internal::traits::Index Index; \ enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ Flags = Eigen::internal::traits::Flags, \ CoeffReadCost = Eigen::internal::traits::CoeffReadCost, \ SizeAtCompileTime = Base::SizeAtCompileTime, \ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ using Base::derived; \ using Base::const_cast_derived; #define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \ _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase) const int CoherentAccessPattern = 0x1; const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern; const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern; const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern; template class SparseMatrix; template class DynamicSparseMatrix; template class SparseVector; template class MappedSparseMatrix; template class SparseTriangularView; template class SparseSelfAdjointView; template class SparseDiagonalProduct; template class SparseView; template class SparseSparseProduct; template class SparseTimeDenseProduct; template class DenseTimeSparseProduct; template class SparseDenseOuterProduct; template struct SparseSparseProductReturnType; template::ColsAtCompileTime,internal::traits::RowsAtCompileTime)> struct DenseSparseProductReturnType; template::ColsAtCompileTime,internal::traits::RowsAtCompileTime)> struct SparseDenseProductReturnType; template class SparseSymmetricPermutationProduct; namespace internal { template struct sparse_eval; template struct eval : public sparse_eval::RowsAtCompileTime,traits::ColsAtCompileTime> {}; template struct sparse_eval { typedef typename traits::Scalar _Scalar; typedef typename traits::Index _Index; public: typedef SparseVector<_Scalar, RowMajor, _Index> type; }; template struct sparse_eval { typedef typename traits::Scalar _Scalar; typedef typename traits::Index _Index; public: typedef SparseVector<_Scalar, ColMajor, _Index> type; }; template struct sparse_eval { typedef typename traits::Scalar _Scalar; typedef typename traits::Index _Index; enum { _Options = ((traits::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor }; public: typedef SparseMatrix<_Scalar, _Options, _Index> type; }; template struct sparse_eval { typedef typename traits::Scalar _Scalar; public: typedef Matrix<_Scalar, 1, 1> type; }; template struct plain_matrix_type { typedef typename traits::Scalar _Scalar; typedef typename traits::Index _Index; enum { _Options = ((traits::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor }; public: typedef SparseMatrix<_Scalar, _Options, _Index> type; }; } // end namespace internal /** \ingroup SparseCore_Module * * \class Triplet * * \brief A small structure to hold a non zero as a triplet (i,j,value). * * \sa SparseMatrix::setFromTriplets() */ template::Index > class Triplet { public: Triplet() : m_row(0), m_col(0), m_value(0) {} Triplet(const Index& i, const Index& j, const Scalar& v = Scalar(0)) : m_row(i), m_col(j), m_value(v) {} /** \returns the row index of the element */ const Index& row() const { return m_row; } /** \returns the column index of the element */ const Index& col() const { return m_col; } /** \returns the value of the element */ const Scalar& value() const { return m_value; } protected: Index m_row, m_col; Scalar m_value; }; } // end namespace Eigen #endif // EIGEN_SPARSEUTIL_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseSelfAdjointView.h0000644000176200001440000004403612501315103024774 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H #define EIGEN_SPARSE_SELFADJOINTVIEW_H namespace Eigen { /** \ingroup SparseCore_Module * \class SparseSelfAdjointView * * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix. * * \param MatrixType the type of the dense matrix storing the coefficients * \param UpLo can be either \c #Lower or \c #Upper * * This class is an expression of a sefladjoint matrix from a triangular part of a matrix * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView() * and most of the time this is the only way that it is used. * * \sa SparseMatrixBase::selfadjointView() */ template class SparseSelfAdjointTimeDenseProduct; template class DenseTimeSparseSelfAdjointProduct; namespace internal { template struct traits > : traits { }; template void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm = 0); template void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm = 0); } template class SparseSelfAdjointView : public EigenBase > { public: typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef Matrix VectorI; typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_all::type _MatrixTypeNested; inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix) { eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices"); } inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } /** \internal \returns a reference to the nested matrix */ const _MatrixTypeNested& matrix() const { return m_matrix; } _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); } /** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs. * * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product. * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product. */ template SparseSparseProduct operator*(const SparseMatrixBase& rhs) const { return SparseSparseProduct(*this, rhs.derived()); } /** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs. * * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product. * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product. */ template friend SparseSparseProduct operator*(const SparseMatrixBase& lhs, const SparseSelfAdjointView& rhs) { return SparseSparseProduct(lhs.derived(), rhs); } /** Efficient sparse self-adjoint matrix times dense vector/matrix product */ template SparseSelfAdjointTimeDenseProduct operator*(const MatrixBase& rhs) const { return SparseSelfAdjointTimeDenseProduct(m_matrix, rhs.derived()); } /** Efficient dense vector/matrix times sparse self-adjoint matrix product */ template friend DenseTimeSparseSelfAdjointProduct operator*(const MatrixBase& lhs, const SparseSelfAdjointView& rhs) { return DenseTimeSparseSelfAdjointProduct(lhs.derived(), rhs.m_matrix); } /** Perform a symmetric rank K update of the selfadjoint matrix \c *this: * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix. * * \returns a reference to \c *this * * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply * call this function with u.adjoint(). */ template SparseSelfAdjointView& rankUpdate(const SparseMatrixBase& u, const Scalar& alpha = Scalar(1)); /** \internal triggered by sparse_matrix = SparseSelfadjointView; */ template void evalTo(SparseMatrix& _dest) const { internal::permute_symm_to_fullsymm(m_matrix, _dest); } template void evalTo(DynamicSparseMatrix& _dest) const { // TODO directly evaluate into _dest; SparseMatrix tmp(_dest.rows(),_dest.cols()); internal::permute_symm_to_fullsymm(m_matrix, tmp); _dest = tmp; } /** \returns an expression of P H P^-1 */ SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix& perm) const { return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm); } template SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct& permutedMatrix) { permutedMatrix.evalTo(*this); return *this; } SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src) { PermutationMatrix pnull; return *this = src.twistedBy(pnull); } template SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src) { PermutationMatrix pnull; return *this = src.twistedBy(pnull); } // const SparseLLT llt() const; // const SparseLDLT ldlt() const; protected: typename MatrixType::Nested m_matrix; mutable VectorI m_countPerRow; mutable VectorI m_countPerCol; }; /*************************************************************************** * Implementation of SparseMatrixBase methods ***************************************************************************/ template template const SparseSelfAdjointView SparseMatrixBase::selfadjointView() const { return derived(); } template template SparseSelfAdjointView SparseMatrixBase::selfadjointView() { return derived(); } /*************************************************************************** * Implementation of SparseSelfAdjointView methods ***************************************************************************/ template template SparseSelfAdjointView& SparseSelfAdjointView::rankUpdate(const SparseMatrixBase& u, const Scalar& alpha) { SparseMatrix tmp = u * u.adjoint(); if(alpha==Scalar(0)) m_matrix.const_cast_derived() = tmp.template triangularView(); else m_matrix.const_cast_derived() += alpha * tmp.template triangularView(); return *this; } /*************************************************************************** * Implementation of sparse self-adjoint time dense matrix ***************************************************************************/ namespace internal { template struct traits > : traits, Lhs, Rhs> > { typedef Dense StorageKind; }; } template class SparseSelfAdjointTimeDenseProduct : public ProductBase, Lhs, Rhs> { public: EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct) SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const { EIGEN_ONLY_USED_FOR_DEBUG(alpha); // TODO use alpha eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry"); typedef typename internal::remove_all::type _Lhs; typedef typename _Lhs::InnerIterator LhsInnerIterator; enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit, ProcessFirstHalf = ((UpLo&(Upper|Lower))==(Upper|Lower)) || ( (UpLo&Upper) && !LhsIsRowMajor) || ( (UpLo&Lower) && LhsIsRowMajor), ProcessSecondHalf = !ProcessFirstHalf }; for (Index j=0; j struct traits > : traits, Lhs, Rhs> > {}; } template class DenseTimeSparseSelfAdjointProduct : public ProductBase, Lhs, Rhs> { public: EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct) DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} template void scaleAndAddTo(Dest& /*dest*/, const Scalar& /*alpha*/) const { // TODO } private: DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&); }; /*************************************************************************** * Implementation of symmetric copies and permutations ***************************************************************************/ namespace internal { template struct traits > : traits { }; template void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm) { typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef SparseMatrix Dest; typedef Matrix VectorI; Dest& dest(_dest.derived()); enum { StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor) }; Index size = mat.rows(); VectorI count; count.resize(size); count.setZero(); dest.resize(size,size); for(Index j = 0; jc) || ( UpLo==Upper && rc) || ( (UpLo&Upper)==Upper && r void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm) { typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; SparseMatrix& dest(_dest.derived()); typedef Matrix VectorI; enum { SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor, StorageOrderMatch = int(SrcOrder) == int(DstOrder), DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo, SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo }; Index size = mat.rows(); VectorI count(size); count.setZero(); dest.resize(size,size); for(Index j = 0; jj)) continue; Index ip = perm ? perm[i] : i; count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; } } dest.outerIndexPtr()[0] = 0; for(Index j=0; jj)) continue; Index jp = perm ? perm[j] : j; Index ip = perm? perm[i] : i; Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp); if(!StorageOrderMatch) std::swap(ip,jp); if( ((int(DstUpLo)==int(Lower) && ipjp))) dest.valuePtr()[k] = numext::conj(it.value()); else dest.valuePtr()[k] = it.value(); } } } } template class SparseSymmetricPermutationProduct : public EigenBase > { public: typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; protected: typedef PermutationMatrix Perm; public: typedef Matrix VectorI; typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_all::type _MatrixTypeNested; SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm) : m_matrix(mat), m_perm(perm) {} inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } template void evalTo(SparseMatrix& _dest) const { // internal::permute_symm_to_fullsymm(m_matrix,_dest,m_perm.indices().data()); SparseMatrix tmp; internal::permute_symm_to_fullsymm(m_matrix,tmp,m_perm.indices().data()); _dest = tmp; } template void evalTo(SparseSelfAdjointView& dest) const { internal::permute_symm_to_symm(m_matrix,dest.matrix(),m_perm.indices().data()); } protected: MatrixTypeNested m_matrix; const Perm& m_perm; }; } // end namespace Eigen #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseVector.h0000644000176200001440000003251212647570720023220 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEVECTOR_H #define EIGEN_SPARSEVECTOR_H namespace Eigen { /** \ingroup SparseCore_Module * \class SparseVector * * \brief a sparse vector class * * \tparam _Scalar the scalar type, i.e. the type of the coefficients * * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN. */ namespace internal { template struct traits > { typedef _Scalar Scalar; typedef _Index Index; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { IsColVector = (_Options & RowMajorBit) ? 0 : 1, RowsAtCompileTime = IsColVector ? Dynamic : 1, ColsAtCompileTime = IsColVector ? 1 : Dynamic, MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit), CoeffReadCost = NumTraits::ReadCost, SupportedAccessPatterns = InnerRandomAccessPattern }; }; // Sparse-Vector-Assignment kinds: enum { SVA_RuntimeSwitch, SVA_Inner, SVA_Outer }; template< typename Dest, typename Src, int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch : Src::InnerSizeAtCompileTime==1 ? SVA_Outer : SVA_Inner> struct sparse_vector_assign_selector; } template class SparseVector : public SparseMatrixBase > { typedef SparseMatrixBase SparseBase; public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=) typedef internal::CompressedStorage Storage; enum { IsColVector = internal::traits::IsColVector }; enum { Options = _Options }; EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; } EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; } EIGEN_STRONG_INLINE Index innerSize() const { return m_size; } EIGEN_STRONG_INLINE Index outerSize() const { return 1; } EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); } EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); } EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); } EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); } /** \internal */ inline Storage& data() { return m_data; } /** \internal */ inline const Storage& data() const { return m_data; } inline Scalar coeff(Index row, Index col) const { eigen_assert(IsColVector ? (col==0 && row>=0 && row=0 && col=0 && i=0 && row=0 && col=0 && i(m_data.size()); } inline void startVec(Index outer) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } inline Scalar& insertBackByOuterInner(Index outer, Index inner) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); return insertBack(inner); } inline Scalar& insertBack(Index i) { m_data.append(0, i); return m_data.value(m_data.size()-1); } inline Scalar& insert(Index row, Index col) { eigen_assert(IsColVector ? (col==0 && row>=0 && row=0 && col=0 && i= startId) && (m_data.index(p) > i) ) { m_data.index(p+1) = m_data.index(p); m_data.value(p+1) = m_data.value(p); --p; } m_data.index(p+1) = i; m_data.value(p+1) = 0; return m_data.value(p+1); } /** */ inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); } inline void finalize() {} void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits::dummy_precision()) { m_data.prune(reference,epsilon); } void resize(Index rows, Index cols) { eigen_assert(rows==1 || cols==1); resize(IsColVector ? rows : cols); } void resize(Index newSize) { m_size = newSize; m_data.clear(); } void resizeNonZeros(Index size) { m_data.resize(size); } inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); } inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); } inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); } template inline SparseVector(const SparseMatrixBase& other) : m_size(0) { check_template_parameters(); *this = other.derived(); } inline SparseVector(const SparseVector& other) : SparseBase(other), m_size(0) { check_template_parameters(); *this = other.derived(); } /** Swaps the values of \c *this and \a other. * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only. * \sa SparseMatrixBase::swap() */ inline void swap(SparseVector& other) { std::swap(m_size, other.m_size); m_data.swap(other.m_data); } inline SparseVector& operator=(const SparseVector& other) { if (other.isRValue()) { swap(other.const_cast_derived()); } else { resize(other.size()); m_data = other.m_data; } return *this; } template inline SparseVector& operator=(const SparseMatrixBase& other) { SparseVector tmp(other.size()); internal::sparse_vector_assign_selector::run(tmp,other.derived()); this->swap(tmp); return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN template inline SparseVector& operator=(const SparseSparseProduct& product) { return Base::operator=(product); } #endif friend std::ostream & operator << (std::ostream & s, const SparseVector& m) { for (Index i=0; i::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); } Storage m_data; Index m_size; }; template class SparseVector::InnerIterator { public: InnerIterator(const SparseVector& vec, Index outer=0) : m_data(vec.m_data), m_id(0), m_end(static_cast(m_data.size())) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } InnerIterator(const internal::CompressedStorage& data) : m_data(data), m_id(0), m_end(static_cast(m_data.size())) {} inline InnerIterator& operator++() { m_id++; return *this; } inline Scalar value() const { return m_data.value(m_id); } inline Scalar& valueRef() { return const_cast(m_data.value(m_id)); } inline Index index() const { return m_data.index(m_id); } inline Index row() const { return IsColVector ? index() : 0; } inline Index col() const { return IsColVector ? 0 : index(); } inline operator bool() const { return (m_id < m_end); } protected: const internal::CompressedStorage& m_data; Index m_id; const Index m_end; }; template class SparseVector::ReverseInnerIterator { public: ReverseInnerIterator(const SparseVector& vec, Index outer=0) : m_data(vec.m_data), m_id(static_cast(m_data.size())), m_start(0) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } ReverseInnerIterator(const internal::CompressedStorage& data) : m_data(data), m_id(static_cast(m_data.size())), m_start(0) {} inline ReverseInnerIterator& operator--() { m_id--; return *this; } inline Scalar value() const { return m_data.value(m_id-1); } inline Scalar& valueRef() { return const_cast(m_data.value(m_id-1)); } inline Index index() const { return m_data.index(m_id-1); } inline Index row() const { return IsColVector ? index() : 0; } inline Index col() const { return IsColVector ? 0 : index(); } inline operator bool() const { return (m_id > m_start); } protected: const internal::CompressedStorage& m_data; Index m_id; const Index m_start; }; namespace internal { template< typename Dest, typename Src> struct sparse_vector_assign_selector { static void run(Dest& dst, const Src& src) { eigen_internal_assert(src.innerSize()==src.size()); for(typename Src::InnerIterator it(src, 0); it; ++it) dst.insert(it.index()) = it.value(); } }; template< typename Dest, typename Src> struct sparse_vector_assign_selector { static void run(Dest& dst, const Src& src) { eigen_internal_assert(src.outerSize()==src.size()); for(typename Dest::Index i=0; i struct sparse_vector_assign_selector { static void run(Dest& dst, const Src& src) { if(src.outerSize()==1) sparse_vector_assign_selector::run(dst, src); else sparse_vector_assign_selector::run(dst, src); } }; } } // end namespace Eigen #endif // EIGEN_SPARSEVECTOR_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseDot.h0000644000176200001440000000613412501315103022462 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_DOT_H #define EIGEN_SPARSE_DOT_H namespace Eigen { template template typename internal::traits::Scalar SparseMatrixBase::dot(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) eigen_assert(size() == other.size()); eigen_assert(other.size()>0 && "you are using a non initialized vector"); typename Derived::InnerIterator i(derived(),0); Scalar res(0); while (i) { res += numext::conj(i.value()) * other.coeff(i.index()); ++i; } return res; } template template typename internal::traits::Scalar SparseMatrixBase::dot(const SparseMatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) eigen_assert(size() == other.size()); typedef typename Derived::Nested Nested; typedef typename OtherDerived::Nested OtherNested; typedef typename internal::remove_all::type NestedCleaned; typedef typename internal::remove_all::type OtherNestedCleaned; Nested nthis(derived()); OtherNested nother(other.derived()); typename NestedCleaned::InnerIterator i(nthis,0); typename OtherNestedCleaned::InnerIterator j(nother,0); Scalar res(0); while (i && j) { if (i.index()==j.index()) { res += numext::conj(i.value()) * j.value(); ++i; ++j; } else if (i.index() inline typename NumTraits::Scalar>::Real SparseMatrixBase::squaredNorm() const { return numext::real((*this).cwiseAbs2().sum()); } template inline typename NumTraits::Scalar>::Real SparseMatrixBase::norm() const { using std::sqrt; return sqrt(squaredNorm()); } template inline typename NumTraits::Scalar>::Real SparseMatrixBase::blueNorm() const { return internal::blueNorm_impl(*this); } } // end namespace Eigen #endif // EIGEN_SPARSE_DOT_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseColEtree.h0000644000176200001440000001403512305236565023455 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file is the modified version of sp_coletree.c file in SuperLU * -- SuperLU routine (version 3.1) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * August 1, 2008 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef SPARSE_COLETREE_H #define SPARSE_COLETREE_H namespace Eigen { namespace internal { /** Find the root of the tree/set containing the vertex i : Use Path halving */ template Index etree_find (Index i, IndexVector& pp) { Index p = pp(i); // Parent Index gp = pp(p); // Grand parent while (gp != p) { pp(i) = gp; // Parent pointer on find path is changed to former grand parent i = gp; p = pp(i); gp = pp(p); } return p; } /** Compute the column elimination tree of a sparse matrix * \param mat The matrix in column-major format. * \param parent The elimination tree * \param firstRowElt The column index of the first element in each row * \param perm The permutation to apply to the column of \b mat */ template int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::Index *perm=0) { typedef typename MatrixType::Index Index; Index nc = mat.cols(); // Number of columns Index m = mat.rows(); Index diagSize = (std::min)(nc,m); IndexVector root(nc); // root of subtree of etree root.setZero(); IndexVector pp(nc); // disjoint sets pp.setZero(); // Initialize disjoint sets parent.resize(mat.cols()); //Compute first nonzero column in each row Index row,col; firstRowElt.resize(m); firstRowElt.setConstant(nc); firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1); bool found_diag; for (col = 0; col < nc; col++) { Index pcol = col; if(perm) pcol = perm[col]; for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it) { row = it.row(); firstRowElt(row) = (std::min)(firstRowElt(row), col); } } /* Compute etree by Liu's algorithm for symmetric matrices, except use (firstRowElt[r],c) in place of an edge (r,c) of A. Thus each row clique in A'*A is replaced by a star centered at its first vertex, which has the same fill. */ Index rset, cset, rroot; for (col = 0; col < nc; col++) { found_diag = col>=m; pp(col) = col; cset = col; root(cset) = col; parent(col) = nc; /* The diagonal element is treated here even if it does not exist in the matrix * hence the loop is executed once more */ Index pcol = col; if(perm) pcol = perm[col]; for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it) { // A sequence of interleaved find and union is performed Index i = col; if(it) i = it.index(); if (i == col) found_diag = true; row = firstRowElt(i); if (row >= col) continue; rset = internal::etree_find(row, pp); // Find the name of the set containing row rroot = root(rset); if (rroot != col) { parent(rroot) = col; pp(cset) = rset; cset = rset; root(cset) = col; } } } return 0; } /** * Depth-first search from vertex n. No recursion. * This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France. */ template void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, Index postnum) { Index current = n, first, next; while (postnum != n) { // No kid for the current node first = first_kid(current); // no kid for the current node if (first == -1) { // Numbering this node because it has no kid post(current) = postnum++; // looking for the next kid next = next_kid(current); while (next == -1) { // No more kids : back to the parent node current = parent(current); // numbering the parent node post(current) = postnum++; // Get the next kid next = next_kid(current); } // stopping criterion if (postnum == n+1) return; // Updating current node current = next; } else { current = first; } } } /** * \brief Post order a tree * \param n the number of nodes * \param parent Input tree * \param post postordered tree */ template void treePostorder(Index n, IndexVector& parent, IndexVector& post) { IndexVector first_kid, next_kid; // Linked list of children Index postnum; // Allocate storage for working arrays and results first_kid.resize(n+1); next_kid.setZero(n+1); post.setZero(n+1); // Set up structure describing children Index v, dad; first_kid.setConstant(-1); for (v = n-1; v >= 0; v--) { dad = parent(v); next_kid(v) = first_kid(dad); first_kid(dad) = v; } // Depth-first search from dummy root vertex #n postnum = 0; internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum); } } // end namespace internal } // end namespace Eigen #endif // SPARSE_COLETREE_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseTranspose.h0000644000176200001440000000467512501315103023722 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSETRANSPOSE_H #define EIGEN_SPARSETRANSPOSE_H namespace Eigen { template class TransposeImpl : public SparseMatrixBase > { typedef typename internal::remove_all::type _MatrixTypeNested; public: EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose ) class InnerIterator; class ReverseInnerIterator; inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); } }; // NOTE: VC10 and VC11 trigger an ICE if don't put typename TransposeImpl:: in front of Index, // a typedef typename TransposeImpl::Index Index; // does not fix the issue. // An alternative is to define the nested class in the parent class itself. template class TransposeImpl::InnerIterator : public _MatrixTypeNested::InnerIterator { typedef typename _MatrixTypeNested::InnerIterator Base; typedef typename TransposeImpl::Index Index; public: EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl::Index outer) : Base(trans.derived().nestedExpression(), outer) {} typename TransposeImpl::Index row() const { return Base::col(); } typename TransposeImpl::Index col() const { return Base::row(); } }; template class TransposeImpl::ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator { typedef typename _MatrixTypeNested::ReverseInnerIterator Base; typedef typename TransposeImpl::Index Index; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl::Index outer) : Base(xpr.derived().nestedExpression(), outer) {} typename TransposeImpl::Index row() const { return Base::col(); } typename TransposeImpl::Index col() const { return Base::row(); } }; } // end namespace Eigen #endif // EIGEN_SPARSETRANSPOSE_H RcppEigen/inst/include/Eigen/src/SparseCore/SparsePermutation.h0000644000176200001440000001426712446006401024257 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_PERMUTATION_H #define EIGEN_SPARSE_PERMUTATION_H // This file implements sparse * permutation products namespace Eigen { namespace internal { template struct traits > { typedef typename remove_all::type MatrixTypeNestedCleaned; typedef typename MatrixTypeNestedCleaned::Scalar Scalar; typedef typename MatrixTypeNestedCleaned::Index Index; enum { SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor, MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight }; typedef typename internal::conditional, SparseMatrix >::type ReturnType; }; template struct permut_sparsematrix_product_retval : public ReturnByValue > { typedef typename remove_all::type MatrixTypeNestedCleaned; typedef typename MatrixTypeNestedCleaned::Scalar Scalar; typedef typename MatrixTypeNestedCleaned::Index Index; enum { SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor, MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight }; permut_sparsematrix_product_retval(const PermutationType& perm, const MatrixType& matrix) : m_permutation(perm), m_matrix(matrix) {} inline int rows() const { return m_matrix.rows(); } inline int cols() const { return m_matrix.cols(); } template inline void evalTo(Dest& dst) const { if(MoveOuter) { SparseMatrix tmp(m_matrix.rows(), m_matrix.cols()); Matrix sizes(m_matrix.outerSize()); for(Index j=0; j tmp(m_matrix.rows(), m_matrix.cols()); Matrix sizes(tmp.outerSize()); sizes.setZero(); PermutationMatrix perm; if((Side==OnTheLeft) ^ Transposed) perm = m_permutation; else perm = m_permutation.transpose(); for(Index j=0; j inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, false> operator*(const SparseMatrixBase& matrix, const PermutationBase& perm) { return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, false>(perm, matrix.derived()); } /** \returns the matrix with the permutation applied to the rows */ template inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, false> operator*( const PermutationBase& perm, const SparseMatrixBase& matrix) { return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, false>(perm, matrix.derived()); } /** \returns the matrix with the inverse permutation applied to the columns. */ template inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, true> operator*(const SparseMatrixBase& matrix, const Transpose >& tperm) { return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, true>(tperm.nestedPermutation(), matrix.derived()); } /** \returns the matrix with the inverse permutation applied to the rows. */ template inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, true> operator*(const Transpose >& tperm, const SparseMatrixBase& matrix) { return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, true>(tperm.nestedPermutation(), matrix.derived()); } } // end namespace Eigen #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseTriangularView.h0000644000176200001440000001346112253717461024721 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_TRIANGULARVIEW_H #define EIGEN_SPARSE_TRIANGULARVIEW_H namespace Eigen { namespace internal { template struct traits > : public traits {}; } // namespace internal template class SparseTriangularView : public SparseMatrixBase > { enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit)) || ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)), SkipLast = !SkipFirst, SkipDiag = (Mode&ZeroDiag) ? 1 : 0, HasUnitDiag = (Mode&UnitDiag) ? 1 : 0 }; public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView) class InnerIterator; class ReverseInnerIterator; inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_reference::type MatrixTypeNestedNonRef; typedef typename internal::remove_all::type MatrixTypeNestedCleaned; inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {} /** \internal */ inline const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; } template typename internal::plain_matrix_type_column_major::type solve(const MatrixBase& other) const; template void solveInPlace(MatrixBase& other) const; template void solveInPlace(SparseMatrixBase& other) const; protected: MatrixTypeNested m_matrix; }; template class SparseTriangularView::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator { typedef typename MatrixTypeNestedCleaned::InnerIterator Base; typedef typename SparseTriangularView::Index Index; public: EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer) : Base(view.nestedExpression(), outer), m_returnOne(false) { if(SkipFirst) { while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()=Base::outer())) { if((!SkipFirst) && Base::operator bool()) Base::operator++(); m_returnOne = true; } } EIGEN_STRONG_INLINE InnerIterator& operator++() { if(HasUnitDiag && m_returnOne) m_returnOne = false; else { Base::operator++(); if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer())) { if((!SkipFirst) && Base::operator bool()) Base::operator++(); m_returnOne = true; } } return *this; } inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); } inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); } inline Index index() const { if(HasUnitDiag && m_returnOne) return Base::outer(); else return Base::index(); } inline Scalar value() const { if(HasUnitDiag && m_returnOne) return Scalar(1); else return Base::value(); } EIGEN_STRONG_INLINE operator bool() const { if(HasUnitDiag && m_returnOne) return true; if(SkipFirst) return Base::operator bool(); else { if (SkipDiag) return (Base::operator bool() && this->index() < this->outer()); else return (Base::operator bool() && this->index() <= this->outer()); } } protected: bool m_returnOne; }; template class SparseTriangularView::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator { typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base; typedef typename SparseTriangularView::Index Index; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTriangularView& view, Index outer) : Base(view.nestedExpression(), outer) { eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal"); if(SkipLast) { while((*this) && (SkipDiag ? this->index()>=outer : this->index()>outer)) --(*this); } } EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() { Base::operator--(); return *this; } inline Index row() const { return Base::row(); } inline Index col() const { return Base::col(); } EIGEN_STRONG_INLINE operator bool() const { if (SkipLast) return Base::operator bool() ; else { if(SkipDiag) return (Base::operator bool() && this->index() > this->outer()); else return (Base::operator bool() && this->index() >= this->outer()); } } }; template template inline const SparseTriangularView SparseMatrixBase::triangularView() const { return derived(); } } // end namespace Eigen #endif // EIGEN_SPARSE_TRIANGULARVIEW_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseMatrix.h0000644000176200001440000013010012647570720023212 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEMATRIX_H #define EIGEN_SPARSEMATRIX_H namespace Eigen { /** \ingroup SparseCore_Module * * \class SparseMatrix * * \brief A versatible sparse matrix representation * * This class implements a more versatile variants of the common \em compressed row/column storage format. * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index. * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero * can be done with limited memory reallocation and copies. * * A call to the function makeCompressed() turns the matrix into the standard \em compressed format * compatible with many library. * * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages". * * \tparam _Scalar the scalar type, i.e. the type of the coefficients * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility * is ColMajor or RowMajor. The default is 0 which means column-major. * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN. */ namespace internal { template struct traits > { typedef _Scalar Scalar; typedef _Index Index; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = Dynamic, ColsAtCompileTime = Dynamic, MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic, Flags = _Options | NestByRefBit | LvalueBit, CoeffReadCost = NumTraits::ReadCost, SupportedAccessPatterns = InnerRandomAccessPattern }; }; template struct traits, DiagIndex> > { typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType; typedef typename nested::type MatrixTypeNested; typedef typename remove_reference::type _MatrixTypeNested; typedef _Scalar Scalar; typedef Dense StorageKind; typedef _Index Index; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = Dynamic, ColsAtCompileTime = 1, MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = 1, Flags = 0, CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10 }; }; } // end namespace internal template class SparseMatrix : public SparseMatrixBase > { public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=) typedef MappedSparseMatrix Map; using Base::IsRowMajor; typedef internal::CompressedStorage Storage; enum { Options = _Options }; protected: typedef SparseMatrix TransposedSparseMatrix; Index m_outerSize; Index m_innerSize; Index* m_outerIndex; Index* m_innerNonZeros; // optional, if null then the data is compressed Storage m_data; Eigen::Map > innerNonZeros() { return Eigen::Map >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } const Eigen::Map > innerNonZeros() const { return Eigen::Map >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } public: /** \returns whether \c *this is in compressed form. */ inline bool isCompressed() const { return m_innerNonZeros==0; } /** \returns the number of rows of the matrix */ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } /** \returns the number of columns of the matrix */ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */ inline Index innerSize() const { return m_innerSize; } /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */ inline Index outerSize() const { return m_outerSize; } /** \returns a const pointer to the array of values. * This function is aimed at interoperability with other libraries. * \sa innerIndexPtr(), outerIndexPtr() */ inline const Scalar* valuePtr() const { return &m_data.value(0); } /** \returns a non-const pointer to the array of values. * This function is aimed at interoperability with other libraries. * \sa innerIndexPtr(), outerIndexPtr() */ inline Scalar* valuePtr() { return &m_data.value(0); } /** \returns a const pointer to the array of inner indices. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), outerIndexPtr() */ inline const Index* innerIndexPtr() const { return &m_data.index(0); } /** \returns a non-const pointer to the array of inner indices. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), outerIndexPtr() */ inline Index* innerIndexPtr() { return &m_data.index(0); } /** \returns a const pointer to the array of the starting positions of the inner vectors. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), innerIndexPtr() */ inline const Index* outerIndexPtr() const { return m_outerIndex; } /** \returns a non-const pointer to the array of the starting positions of the inner vectors. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), innerIndexPtr() */ inline Index* outerIndexPtr() { return m_outerIndex; } /** \returns a const pointer to the array of the number of non zeros of the inner vectors. * This function is aimed at interoperability with other libraries. * \warning it returns the null pointer 0 in compressed mode */ inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; } /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors. * This function is aimed at interoperability with other libraries. * \warning it returns the null pointer 0 in compressed mode */ inline Index* innerNonZeroPtr() { return m_innerNonZeros; } /** \internal */ inline Storage& data() { return m_data; } /** \internal */ inline const Storage& data() const { return m_data; } /** \returns the value of the matrix at position \a i, \a j * This function returns Scalar(0) if the element is an explicit \em zero */ inline Scalar coeff(Index row, Index col) const { eigen_assert(row>=0 && row=0 && col=0 && row=0 && col=start && "you probably called coeffRef on a non finalized matrix"); if(end<=start) return insert(row,col); const Index p = m_data.searchLowerIndex(start,end-1,inner); if((p=0 && row=0 && col::Constant(outerSize(), 2)); } return insertUncompressed(row,col); } public: class InnerIterator; class ReverseInnerIterator; /** Removes all non zeros but keep allocated memory */ inline void setZero() { m_data.clear(); memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index)); if(m_innerNonZeros) memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index)); } /** \returns the number of non zero coefficients */ inline Index nonZeros() const { if(m_innerNonZeros) return innerNonZeros().sum(); return static_cast(m_data.size()); } /** Preallocates \a reserveSize non zeros. * * Precondition: the matrix must be in compressed mode. */ inline void reserve(Index reserveSize) { eigen_assert(isCompressed() && "This function does not make sense in non compressed mode."); m_data.reserve(reserveSize); } #ifdef EIGEN_PARSED_BY_DOXYGEN /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j. * * This function turns the matrix in non-compressed mode */ template inline void reserve(const SizesType& reserveSizes); #else template inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type()) { EIGEN_UNUSED_VARIABLE(enableif); reserveInnerVectors(reserveSizes); } template inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif = #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename typename #endif SizesType::Scalar()) { EIGEN_UNUSED_VARIABLE(enableif); reserveInnerVectors(reserveSizes); } #endif // EIGEN_PARSED_BY_DOXYGEN protected: template inline void reserveInnerVectors(const SizesType& reserveSizes) { if(isCompressed()) { std::size_t totalReserveSize = 0; // turn the matrix into non-compressed mode m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(Index))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); // temporarily use m_innerSizes to hold the new starting points. Index* newOuterIndex = m_innerNonZeros; Index count = 0; for(Index j=0; j=0; --j) { Index innerNNZ = previousOuterIndex - m_outerIndex[j]; for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); } previousOuterIndex = m_outerIndex[j]; m_outerIndex[j] = newOuterIndex[j]; m_innerNonZeros[j] = innerNNZ; } m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1]; m_data.resize(m_outerIndex[m_outerSize]); } else { Index* newOuterIndex = static_cast(std::malloc((m_outerSize+1)*sizeof(Index))); if (!newOuterIndex) internal::throw_std_bad_alloc(); Index count = 0; for(Index j=0; j(reserveSizes[j], alreadyReserved); count += toReserve + m_innerNonZeros[j]; } newOuterIndex[m_outerSize] = count; m_data.resize(count); for(Index j=m_outerSize-1; j>=0; --j) { Index offset = newOuterIndex[j] - m_outerIndex[j]; if(offset>0) { Index innerNNZ = m_innerNonZeros[j]; for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); } } } std::swap(m_outerIndex, newOuterIndex); std::free(newOuterIndex); } } public: //--- low level purely coherent filling --- /** \internal * \returns a reference to the non zero coefficient at position \a row, \a col assuming that: * - the nonzero does not already exist * - the new coefficient is the last one according to the storage order * * Before filling a given inner vector you must call the statVec(Index) function. * * After an insertion session, you should call the finalize() function. * * \sa insert, insertBackByOuterInner, startVec */ inline Scalar& insertBack(Index row, Index col) { return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row); } /** \internal * \sa insertBack, startVec */ inline Scalar& insertBackByOuterInner(Index outer, Index inner) { eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)"); eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)(m_data.size()); Index i = m_outerSize; // find the last filled column while (i>=0 && m_outerIndex[i]==0) --i; ++i; while (i<=m_outerSize) { m_outerIndex[i] = size; ++i; } } } //--- template void setFromTriplets(const InputIterators& begin, const InputIterators& end); void sumupDuplicates(); //--- /** \internal * same as insert(Index,Index) except that the indices are given relative to the storage order */ Scalar& insertByOuterInner(Index j, Index i) { return insert(IsRowMajor ? j : i, IsRowMajor ? i : j); } /** Turns the matrix into the \em compressed format. */ void makeCompressed() { if(isCompressed()) return; Index oldStart = m_outerIndex[1]; m_outerIndex[1] = m_innerNonZeros[0]; for(Index j=1; j0) { for(Index k=0; k(std::malloc(m_outerSize * sizeof(Index))); for (Index i = 0; i < m_outerSize; i++) { m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; } } /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */ void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits::dummy_precision()) { prune(default_prunning_func(reference,epsilon)); } /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep. * The functor type \a KeepFunc must implement the following function: * \code * bool operator() (const Index& row, const Index& col, const Scalar& value) const; * \endcode * \sa prune(Scalar,RealScalar) */ template void prune(const KeepFunc& keep = KeepFunc()) { // TODO optimize the uncompressed mode to avoid moving and allocating the data twice // TODO also implement a unit test makeCompressed(); Index k = 0; for(Index j=0; jrows() == rows && this->cols() == cols) return; // If one dimension is null, then there is nothing to be preserved if(rows==0 || cols==0) return resize(rows,cols); Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows(); Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols(); Index newInnerSize = IsRowMajor ? cols : rows; // Deals with inner non zeros if (m_innerNonZeros) { // Resize m_innerNonZeros Index *newInnerNonZeros = static_cast(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index))); if (!newInnerNonZeros) internal::throw_std_bad_alloc(); m_innerNonZeros = newInnerNonZeros; for(Index i=m_outerSize; i(std::malloc((m_outerSize+outerChange+1) * sizeof(Index))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); for(Index i = 0; i < m_outerSize; i++) m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; } // Change the m_innerNonZeros in case of a decrease of inner size if (m_innerNonZeros && innerChange < 0) { for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++) { Index &n = m_innerNonZeros[i]; Index start = m_outerIndex[i]; while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; } } m_innerSize = newInnerSize; // Re-allocate outer index structure if necessary if (outerChange == 0) return; Index *newOuterIndex = static_cast(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index))); if (!newOuterIndex) internal::throw_std_bad_alloc(); m_outerIndex = newOuterIndex; if (outerChange > 0) { Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; for(Index i=m_outerSize; i(std::malloc((outerSize + 1) * sizeof(Index))); if (!m_outerIndex) internal::throw_std_bad_alloc(); m_outerSize = outerSize; } if(m_innerNonZeros) { std::free(m_innerNonZeros); m_innerNonZeros = 0; } memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index)); } /** \internal * Resize the nonzero vector to \a size */ void resizeNonZeros(Index size) { // TODO remove this function m_data.resize(size); } /** \returns a const expression of the diagonal coefficients */ const Diagonal diagonal() const { return *this; } /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */ inline SparseMatrix() : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); resize(0, 0); } /** Constructs a \a rows \c x \a cols empty matrix */ inline SparseMatrix(Index rows, Index cols) : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); resize(rows, cols); } /** Constructs a sparse matrix from the sparse expression \a other */ template inline SparseMatrix(const SparseMatrixBase& other) : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) check_template_parameters(); *this = other.derived(); } /** Constructs a sparse matrix from the sparse selfadjoint view \a other */ template inline SparseMatrix(const SparseSelfAdjointView& other) : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); *this = other; } /** Copy constructor (it performs a deep copy) */ inline SparseMatrix(const SparseMatrix& other) : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); *this = other.derived(); } /** \brief Copy constructor with in-place evaluation */ template SparseMatrix(const ReturnByValue& other) : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); initAssignment(other); other.evalTo(*this); } /** Swaps the content of two sparse matrices of the same type. * This is a fast operation that simply swaps the underlying pointers and parameters. */ inline void swap(SparseMatrix& other) { //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); std::swap(m_outerIndex, other.m_outerIndex); std::swap(m_innerSize, other.m_innerSize); std::swap(m_outerSize, other.m_outerSize); std::swap(m_innerNonZeros, other.m_innerNonZeros); m_data.swap(other.m_data); } /** Sets *this to the identity matrix. * This function also turns the matrix into compressed mode, and drop any reserved memory. */ inline void setIdentity() { eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES"); this->m_data.resize(rows()); Eigen::Map >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1); Eigen::Map >(&this->m_data.value(0), rows()).setOnes(); Eigen::Map >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows()); std::free(m_innerNonZeros); m_innerNonZeros = 0; } inline SparseMatrix& operator=(const SparseMatrix& other) { if (other.isRValue()) { swap(other.const_cast_derived()); } else if(this!=&other) { initAssignment(other); if(other.isCompressed()) { memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index)); m_data = other.m_data; } else { Base::operator=(other); } } return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN template inline SparseMatrix& operator=(const SparseSparseProduct& product) { return Base::operator=(product); } template inline SparseMatrix& operator=(const ReturnByValue& other) { initAssignment(other); return Base::operator=(other.derived()); } template inline SparseMatrix& operator=(const EigenBase& other) { return Base::operator=(other.derived()); } #endif template EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase& other); friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m) { EIGEN_DBG_SPARSE( s << "Nonzero entries:\n"; if(m.isCompressed()) for (Index i=0; i&>(m); return s; } /** Destructor */ inline ~SparseMatrix() { std::free(m_outerIndex); std::free(m_innerNonZeros); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** Overloaded for performance */ Scalar sum() const; #endif # ifdef EIGEN_SPARSEMATRIX_PLUGIN # include EIGEN_SPARSEMATRIX_PLUGIN # endif protected: template void initAssignment(const Other& other) { resize(other.rows(), other.cols()); if(m_innerNonZeros) { std::free(m_innerNonZeros); m_innerNonZeros = 0; } } /** \internal * \sa insert(Index,Index) */ EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col); /** \internal * A vector object that is equal to 0 everywhere but v at the position i */ class SingletonVector { Index m_index; Index m_value; public: typedef Index value_type; SingletonVector(Index i, Index v) : m_index(i), m_value(v) {} Index operator[](Index i) const { return i==m_index ? m_value : 0; } }; /** \internal * \sa insert(Index,Index) */ EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col); public: /** \internal * \sa insert(Index,Index) */ EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(!isCompressed()); eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer])); Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++; m_data.index(p) = inner; return (m_data.value(p) = 0); } private: static void check_template_parameters() { EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); } struct default_prunning_func { default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {} inline bool operator() (const Index&, const Index&, const Scalar& value) const { return !internal::isMuchSmallerThan(value, reference, epsilon); } Scalar reference; RealScalar epsilon; }; }; template class SparseMatrix::InnerIterator { public: InnerIterator(const SparseMatrix& mat, Index outer) : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer]) { if(mat.isCompressed()) m_end = mat.m_outerIndex[outer+1]; else m_end = m_id + mat.m_innerNonZeros[outer]; } inline InnerIterator& operator++() { m_id++; return *this; } inline const Scalar& value() const { return m_values[m_id]; } inline Scalar& valueRef() { return const_cast(m_values[m_id]); } inline Index index() const { return m_indices[m_id]; } inline Index outer() const { return m_outer; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id < m_end); } protected: const Scalar* m_values; const Index* m_indices; const Index m_outer; Index m_id; Index m_end; }; template class SparseMatrix::ReverseInnerIterator { public: ReverseInnerIterator(const SparseMatrix& mat, Index outer) : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer]) { if(mat.isCompressed()) m_id = mat.m_outerIndex[outer+1]; else m_id = m_start + mat.m_innerNonZeros[outer]; } inline ReverseInnerIterator& operator--() { --m_id; return *this; } inline const Scalar& value() const { return m_values[m_id-1]; } inline Scalar& valueRef() { return const_cast(m_values[m_id-1]); } inline Index index() const { return m_indices[m_id-1]; } inline Index outer() const { return m_outer; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id > m_start); } protected: const Scalar* m_values; const Index* m_indices; const Index m_outer; Index m_id; const Index m_start; }; namespace internal { template void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0) { EIGEN_UNUSED_VARIABLE(Options); enum { IsRowMajor = SparseMatrixType::IsRowMajor }; typedef typename SparseMatrixType::Scalar Scalar; typedef typename SparseMatrixType::Index Index; SparseMatrix trMat(mat.rows(),mat.cols()); if(begin!=end) { // pass 1: count the nnz per inner-vector Matrix wi(trMat.outerSize()); wi.setZero(); for(InputIterator it(begin); it!=end; ++it) { eigen_assert(it->row()>=0 && it->row()col()>=0 && it->col()col() : it->row())++; } // pass 2: insert all the elements into trMat trMat.reserve(wi); for(InputIterator it(begin); it!=end; ++it) trMat.insertBackUncompressed(it->row(),it->col()) = it->value(); // pass 3: trMat.sumupDuplicates(); } // pass 4: transposed copy -> implicit sorting mat = trMat; } } /** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end. * * A \em triplet is a tuple (i,j,value) defining a non-zero element. * The input list of triplets does not have to be sorted, and can contains duplicated elements. * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up. * This is a \em O(n) operation, with \em n the number of triplet elements. * The initial contents of \c *this is destroyed. * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor, * or the resize(Index,Index) method. The sizes are not extracted from the triplet list. * * The \a InputIterators value_type must provide the following interface: * \code * Scalar value() const; // the value * Scalar row() const; // the row index i * Scalar col() const; // the column index j * \endcode * See for instance the Eigen::Triplet template class. * * Here is a typical usage example: * \code typedef Triplet T; std::vector tripletList; triplets.reserve(estimation_of_entries); for(...) { // ... tripletList.push_back(T(i,j,v_ij)); } SparseMatrixType m(rows,cols); m.setFromTriplets(tripletList.begin(), tripletList.end()); // m is ready to go! * \endcode * * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather * be explicitely stored into a std::vector for instance. */ template template void SparseMatrix::setFromTriplets(const InputIterators& begin, const InputIterators& end) { internal::set_from_triplets(begin, end, *this); } /** \internal */ template void SparseMatrix::sumupDuplicates() { eigen_assert(!isCompressed()); // TODO, in practice we should be able to use m_innerNonZeros for that task Matrix wi(innerSize()); wi.fill(-1); Index count = 0; // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers for(Index j=0; j=start) { // we already meet this entry => accumulate it m_data.value(wi(i)) += m_data.value(k); } else { m_data.value(count) = m_data.value(k); m_data.index(count) = m_data.index(k); wi(i) = count; ++count; } } m_outerIndex[j] = start; } m_outerIndex[m_outerSize] = count; // turn the matrix into compressed form std::free(m_innerNonZeros); m_innerNonZeros = 0; m_data.resize(m_outerIndex[m_outerSize]); } template template EIGEN_DONT_INLINE SparseMatrix& SparseMatrix::operator=(const SparseMatrixBase& other) { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); if (needToTranspose) { // two passes algorithm: // 1 - compute the number of coeffs per dest inner vector // 2 - do the actual copy/eval // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed typedef typename internal::nested::type OtherCopy; typedef typename internal::remove_all::type _OtherCopy; OtherCopy otherCopy(other.derived()); SparseMatrix dest(other.rows(),other.cols()); Eigen::Map > (dest.m_outerIndex,dest.outerSize()).setZero(); // pass 1 // FIXME the above copy could be merged with that pass for (Index j=0; j positions(dest.outerSize()); for (Index j=0; jswap(dest); return *this; } else { if(other.isRValue()) initAssignment(other.derived()); // there is no special optimization return Base::operator=(other.derived()); } } template EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col) { eigen_assert(!isCompressed()); const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; Index room = m_outerIndex[outer+1] - m_outerIndex[outer]; Index innerNNZ = m_innerNonZeros[outer]; if(innerNNZ>=room) { // this inner vector is full, we need to reallocate the whole buffer :( reserve(SingletonVector(outer,std::max(2,innerNNZ))); } Index startId = m_outerIndex[outer]; Index p = startId + m_innerNonZeros[outer]; while ( (p > startId) && (m_data.index(p-1) > inner) ) { m_data.index(p) = m_data.index(p-1); m_data.value(p) = m_data.value(p-1); --p; } eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end"); m_innerNonZeros[outer]++; m_data.index(p) = inner; return (m_data.value(p) = 0); } template EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col) { eigen_assert(isCompressed()); const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; Index previousOuter = outer; if (m_outerIndex[outer+1]==0) { // we start a new inner vector while (previousOuter>=0 && m_outerIndex[previousOuter]==0) { m_outerIndex[previousOuter] = static_cast(m_data.size()); --previousOuter; } m_outerIndex[outer+1] = m_outerIndex[outer]; } // here we have to handle the tricky case where the outerIndex array // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g., // the 2nd inner vector... bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0)) && (size_t(m_outerIndex[outer+1]) == m_data.size()); size_t startId = m_outerIndex[outer]; // FIXME let's make sure sizeof(long int) == sizeof(size_t) size_t p = m_outerIndex[outer+1]; ++m_outerIndex[outer+1]; double reallocRatio = 1; if (m_data.allocatedSize()<=m_data.size()) { // if there is no preallocated memory, let's reserve a minimum of 32 elements if (m_data.size()==0) { m_data.reserve(32); } else { // we need to reallocate the data, to reduce multiple reallocations // we use a smart resize algorithm based on the current filling ratio // in addition, we use double to avoid integers overflows double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1); reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size()); // furthermore we bound the realloc ratio to: // 1) reduce multiple minor realloc when the matrix is almost filled // 2) avoid to allocate too much memory when the matrix is almost empty reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.); } } m_data.resize(m_data.size()+1,reallocRatio); if (!isLastVec) { if (previousOuter==-1) { // oops wrong guess. // let's correct the outer offsets for (Index k=0; k<=(outer+1); ++k) m_outerIndex[k] = 0; Index k=outer+1; while(m_outerIndex[k]==0) m_outerIndex[k++] = 1; while (k<=m_outerSize && m_outerIndex[k]!=0) m_outerIndex[k++]++; p = 0; --k; k = m_outerIndex[k]-1; while (k>0) { m_data.index(k) = m_data.index(k-1); m_data.value(k) = m_data.value(k-1); k--; } } else { // we are not inserting into the last inner vec // update outer indices: Index j = outer+2; while (j<=m_outerSize && m_outerIndex[j]!=0) m_outerIndex[j++]++; --j; // shift data of last vecs: Index k = m_outerIndex[j]-1; while (k>=Index(p)) { m_data.index(k) = m_data.index(k-1); m_data.value(k) = m_data.value(k-1); k--; } } } while ( (p > startId) && (m_data.index(p-1) > inner) ) { m_data.index(p) = m_data.index(p-1); m_data.value(p) = m_data.value(p-1); --p; } m_data.index(p) = inner; return (m_data.value(p) = 0); } } // end namespace Eigen #endif // EIGEN_SPARSEMATRIX_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseFuzzy.h0000644000176200001440000000173012253717461023101 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_FUZZY_H #define EIGEN_SPARSE_FUZZY_H // template // template // bool SparseMatrixBase::isApprox( // const OtherDerived& other, // typename NumTraits::Real prec // ) const // { // const typename internal::nested::type nested(derived()); // const typename internal::nested::type otherNested(other.derived()); // return (nested - otherNested).cwise().abs2().sum() // <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum()); // } #endif // EIGEN_SPARSE_FUZZY_H RcppEigen/inst/include/Eigen/src/SparseCore/TriangularSolver.h0000644000176200001440000002355312550462273024104 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSETRIANGULARSOLVER_H #define EIGEN_SPARSETRIANGULARSOLVER_H namespace Eigen { namespace internal { template::Flags) & RowMajorBit> struct sparse_solve_triangular_selector; // forward substitution, row-major template struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; static void run(const Lhs& lhs, Rhs& other) { for(int col=0 ; col struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; static void run(const Lhs& lhs, Rhs& other) { for(int col=0 ; col=0 ; --i) { Scalar tmp = other.coeff(i,col); Scalar l_ii(0); typename Lhs::InnerIterator it(lhs, i); while(it && it.index() struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; static void run(const Lhs& lhs, Rhs& other) { for(int col=0 ; col struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; static void run(const Lhs& lhs, Rhs& other) { for(int col=0 ; col=0; --i) { Scalar& tmp = other.coeffRef(i,col); if (tmp!=Scalar(0)) // optimization when other is actually sparse { if(!(Mode & UnitDiag)) { // TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements typename Lhs::ReverseInnerIterator it(lhs, i); while(it && it.index()!=i) --it; eigen_assert(it && it.index()==i); other.coeffRef(i,col) /= it.value(); } typename Lhs::InnerIterator it(lhs, i); for(; it && it.index() template void SparseTriangularView::solveInPlace(MatrixBase& other) const { eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows()); eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower))); enum { copy = internal::traits::Flags & RowMajorBit }; typedef typename internal::conditional::type, OtherDerived&>::type OtherCopy; OtherCopy otherCopy(other.derived()); internal::sparse_solve_triangular_selector::type, Mode>::run(m_matrix, otherCopy); if (copy) other = otherCopy; } template template typename internal::plain_matrix_type_column_major::type SparseTriangularView::solve(const MatrixBase& other) const { typename internal::plain_matrix_type_column_major::type res(other); solveInPlace(res); return res; } // pure sparse path namespace internal { template struct sparse_solve_triangular_sparse_selector; // forward substitution, col-major template struct sparse_solve_triangular_sparse_selector { typedef typename Rhs::Scalar Scalar; typedef typename promote_index_type::Index, typename traits::Index>::type Index; static void run(const Lhs& lhs, Rhs& other) { const bool IsLower = (UpLo==Lower); AmbiVector tempVector(other.rows()*2); tempVector.setBounds(0,other.rows()); Rhs res(other.rows(), other.cols()); res.reserve(other.nonZeros()); for(int col=0 ; col=0; i+=IsLower?1:-1) { tempVector.restart(); Scalar& ci = tempVector.coeffRef(i); if (ci!=Scalar(0)) { // find typename Lhs::InnerIterator it(lhs, i); if(!(Mode & UnitDiag)) { if (IsLower) { eigen_assert(it.index()==i); ci /= it.value(); } else ci /= lhs.coeff(i,i); } tempVector.restart(); if (IsLower) { if (it.index()==i) ++it; for(; it; ++it) tempVector.coeffRef(it.index()) -= ci * it.value(); } else { for(; it && it.index()::Iterator it(tempVector/*,1e-12*/); it; ++it) { ++ count; // std::cerr << "fill " << it.index() << ", " << col << "\n"; // std::cout << it.value() << " "; // FIXME use insertBack res.insert(it.index(), col) = it.value(); } // std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n"; } res.finalize(); other = res.markAsRValue(); } }; } // end namespace internal template template void SparseTriangularView::solveInPlace(SparseMatrixBase& other) const { eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows()); eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower))); // enum { copy = internal::traits::Flags & RowMajorBit }; // typedef typename internal::conditional::type, OtherDerived&>::type OtherCopy; // OtherCopy otherCopy(other.derived()); internal::sparse_solve_triangular_sparse_selector::run(m_matrix, other.derived()); // if (copy) // other = otherCopy; } #ifdef EIGEN2_SUPPORT // deprecated stuff: /** \deprecated */ template template void SparseMatrixBase::solveTriangularInPlace(MatrixBase& other) const { this->template triangular().solveInPlace(other); } /** \deprecated */ template template typename internal::plain_matrix_type_column_major::type SparseMatrixBase::solveTriangular(const MatrixBase& other) const { typename internal::plain_matrix_type_column_major::type res(other); derived().solveTriangularInPlace(res); return res; } #endif // EIGEN2_SUPPORT } // end namespace Eigen #endif // EIGEN_SPARSETRIANGULARSOLVER_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseRedux.h0000644000176200001440000000303012501315103023013 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEREDUX_H #define EIGEN_SPARSEREDUX_H namespace Eigen { template typename internal::traits::Scalar SparseMatrixBase::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); Scalar res(0); for (Index j=0; j typename internal::traits >::Scalar SparseMatrix<_Scalar,_Options,_Index>::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); return Matrix::Map(&m_data.value(0), m_data.size()).sum(); } template typename internal::traits >::Scalar SparseVector<_Scalar,_Options,_Index>::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); return Matrix::Map(&m_data.value(0), m_data.size()).sum(); } } // end namespace Eigen #endif // EIGEN_SPARSEREDUX_H RcppEigen/inst/include/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h0000644000176200001440000002104312305236565027137 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H #define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H namespace Eigen { namespace internal { template static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef typename remove_all::type::Scalar Scalar; typedef typename remove_all::type::Index Index; // make sure to call innerSize/outerSize since we fake the storage order. Index rows = lhs.innerSize(); Index cols = rhs.outerSize(); eigen_assert(lhs.outerSize() == rhs.innerSize()); std::vector mask(rows,false); Matrix values(rows); Matrix indices(rows); // estimate the number of non zero entries // given a rhs column containing Y non zeros, we assume that the respective Y columns // of the lhs differs in average of one non zeros, thus the number of non zeros for // the product of a rhs column with the lhs is X+Y where X is the average number of non zero // per column of the lhs. // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); res.setZero(); res.reserve(Index(estimated_nnz_prod)); // we compute each column of the result, one after the other for (Index j=0; j use a quick sort // otherwise => loop through the entire vector // In order to avoid to perform an expensive log2 when the // result is clearly very sparse we use a linear bound up to 200. //if((nnz<200 && nnz1) std::sort(indices.data(),indices.data()+nnz); for(Index k=0; k::Flags&RowMajorBit) ? RowMajor : ColMajor, int RhsStorageOrder = (traits::Flags&RowMajorBit) ? RowMajor : ColMajor, int ResStorageOrder = (traits::Flags&RowMajorBit) ? RowMajor : ColMajor> struct conservative_sparse_sparse_product_selector; template struct conservative_sparse_sparse_product_selector { typedef typename remove_all::type LhsCleaned; typedef typename LhsCleaned::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix RowMajorMatrix; typedef SparseMatrix ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(),rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); // sort the non zeros: RowMajorMatrix resRow(resCol); res = resRow; } }; template struct conservative_sparse_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix RowMajorMatrix; RowMajorMatrix rhsRow = rhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhsRow, lhs, resRow); res = resRow; } }; template struct conservative_sparse_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix RowMajorMatrix; RowMajorMatrix lhsRow = lhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhsRow, resRow); res = resRow; } }; template struct conservative_sparse_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix RowMajorMatrix; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); res = resRow; } }; template struct conservative_sparse_sparse_product_selector { typedef typename traits::type>::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); res = resCol; } }; template struct conservative_sparse_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix ColMajorMatrix; ColMajorMatrix lhsCol = lhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhsCol, rhs, resCol); res = resCol; } }; template struct conservative_sparse_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix ColMajorMatrix; ColMajorMatrix rhsCol = rhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhsCol, resCol); res = resCol; } }; template struct conservative_sparse_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix RowMajorMatrix; typedef SparseMatrix ColMajorMatrix; RowMajorMatrix resRow(lhs.rows(),rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); // sort the non zeros: ColMajorMatrix resCol(resRow); res = resCol; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H RcppEigen/inst/include/Eigen/src/SparseCore/SparseDenseProduct.h0000644000176200001440000002533612550462273024357 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEDENSEPRODUCT_H #define EIGEN_SPARSEDENSEPRODUCT_H namespace Eigen { template struct SparseDenseProductReturnType { typedef SparseTimeDenseProduct Type; }; template struct SparseDenseProductReturnType { typedef typename internal::conditional< Lhs::IsRowMajor, SparseDenseOuterProduct, SparseDenseOuterProduct >::type Type; }; template struct DenseSparseProductReturnType { typedef DenseTimeSparseProduct Type; }; template struct DenseSparseProductReturnType { typedef typename internal::conditional< Rhs::IsRowMajor, SparseDenseOuterProduct, SparseDenseOuterProduct >::type Type; }; namespace internal { template struct traits > { typedef Sparse StorageKind; typedef typename scalar_product_traits::Scalar, typename traits::Scalar>::ReturnType Scalar; typedef typename Lhs::Index Index; typedef typename Lhs::Nested LhsNested; typedef typename Rhs::Nested RhsNested; typedef typename remove_all::type _LhsNested; typedef typename remove_all::type _RhsNested; enum { LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost, RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost, RowsAtCompileTime = Tr ? int(traits::RowsAtCompileTime) : int(traits::RowsAtCompileTime), ColsAtCompileTime = Tr ? int(traits::ColsAtCompileTime) : int(traits::ColsAtCompileTime), MaxRowsAtCompileTime = Tr ? int(traits::MaxRowsAtCompileTime) : int(traits::MaxRowsAtCompileTime), MaxColsAtCompileTime = Tr ? int(traits::MaxColsAtCompileTime) : int(traits::MaxColsAtCompileTime), Flags = Tr ? RowMajorBit : 0, CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits::MulCost }; }; } // end namespace internal template class SparseDenseOuterProduct : public SparseMatrixBase > { public: typedef SparseMatrixBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct) typedef internal::traits Traits; private: typedef typename Traits::LhsNested LhsNested; typedef typename Traits::RhsNested RhsNested; typedef typename Traits::_LhsNested _LhsNested; typedef typename Traits::_RhsNested _RhsNested; public: class InnerIterator; EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE); } EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Rhs& rhs, const Lhs& lhs) : m_lhs(lhs), m_rhs(rhs) { EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE); } EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); } EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); } EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } protected: LhsNested m_lhs; RhsNested m_rhs; }; template class SparseDenseOuterProduct::InnerIterator : public _LhsNested::InnerIterator { typedef typename _LhsNested::InnerIterator Base; typedef typename SparseDenseOuterProduct::Index Index; public: EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer) : Base(prod.lhs(), 0), m_outer(outer), m_factor(get(prod.rhs(), outer, typename internal::traits::StorageKind() )) { } inline Index outer() const { return m_outer; } inline Index row() const { return Transpose ? m_outer : Base::index(); } inline Index col() const { return Transpose ? Base::index() : m_outer; } inline Scalar value() const { return Base::value() * m_factor; } protected: static Scalar get(const _RhsNested &rhs, Index outer, Dense = Dense()) { return rhs.coeff(outer); } static Scalar get(const _RhsNested &rhs, Index outer, Sparse = Sparse()) { typename Traits::_RhsNested::InnerIterator it(rhs, outer); if (it && it.index()==0) return it.value(); return Scalar(0); } Index m_outer; Scalar m_factor; }; namespace internal { template struct traits > : traits, Lhs, Rhs> > { typedef Dense StorageKind; typedef MatrixXpr XprKind; }; template struct sparse_time_dense_product_impl; template struct sparse_time_dense_product_impl { typedef typename internal::remove_all::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; typedef typename Lhs::Index Index; typedef typename Lhs::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { for(Index c=0; c struct sparse_time_dense_product_impl { typedef typename internal::remove_all::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::Index Index; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { for(Index c=0; c struct sparse_time_dense_product_impl { typedef typename internal::remove_all::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::Index Index; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { for(Index j=0; j struct sparse_time_dense_product_impl { typedef typename internal::remove_all::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::Index Index; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { for(Index j=0; j inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { sparse_time_dense_product_impl::run(lhs, rhs, res, alpha); } } // end namespace internal template class SparseTimeDenseProduct : public ProductBase, Lhs, Rhs> { public: EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct) SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const { internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha); } private: SparseTimeDenseProduct& operator=(const SparseTimeDenseProduct&); }; // dense = dense * sparse namespace internal { template struct traits > : traits, Lhs, Rhs> > { typedef Dense StorageKind; }; } // end namespace internal template class DenseTimeSparseProduct : public ProductBase, Lhs, Rhs> { public: EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseProduct) DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const { Transpose lhs_t(m_lhs); Transpose rhs_t(m_rhs); Transpose dest_t(dest); internal::sparse_time_dense_product(rhs_t, lhs_t, dest_t, alpha); } private: DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&); }; } // end namespace Eigen #endif // EIGEN_SPARSEDENSEPRODUCT_H RcppEigen/inst/include/Eigen/src/StlSupport/0000755000176200001440000000000012375356050020511 5ustar liggesusersRcppEigen/inst/include/Eigen/src/StlSupport/details.h0000644000176200001440000000531312253717461022313 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2009 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STL_DETAILS_H #define EIGEN_STL_DETAILS_H #ifndef EIGEN_ALIGNED_ALLOCATOR #define EIGEN_ALIGNED_ALLOCATOR Eigen::aligned_allocator #endif namespace Eigen { // This one is needed to prevent reimplementing the whole std::vector. template class aligned_allocator_indirection : public EIGEN_ALIGNED_ALLOCATOR { public: typedef size_t size_type; typedef ptrdiff_t difference_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; typedef T value_type; template struct rebind { typedef aligned_allocator_indirection other; }; aligned_allocator_indirection() {} aligned_allocator_indirection(const aligned_allocator_indirection& ) : EIGEN_ALIGNED_ALLOCATOR() {} aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR& ) {} template aligned_allocator_indirection(const aligned_allocator_indirection& ) {} template aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR& ) {} ~aligned_allocator_indirection() {} }; #ifdef _MSC_VER // sometimes, MSVC detects, at compile time, that the argument x // in std::vector::resize(size_t s,T x) won't be aligned and generate an error // even if this function is never called. Whence this little wrapper. #define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) \ typename Eigen::internal::conditional< \ Eigen::internal::is_arithmetic::value, \ T, \ Eigen::internal::workaround_msvc_stl_support \ >::type namespace internal { template struct workaround_msvc_stl_support : public T { inline workaround_msvc_stl_support() : T() {} inline workaround_msvc_stl_support(const T& other) : T(other) {} inline operator T& () { return *static_cast(this); } inline operator const T& () const { return *static_cast(this); } template inline T& operator=(const OtherT& other) { T::operator=(other); return *this; } inline workaround_msvc_stl_support& operator=(const workaround_msvc_stl_support& other) { T::operator=(other); return *this; } }; } #else #define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) T #endif } #endif // EIGEN_STL_DETAILS_H RcppEigen/inst/include/Eigen/src/StlSupport/StdVector.h0000644000176200001440000001210512375356050022576 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2009 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STDVECTOR_H #define EIGEN_STDVECTOR_H #include "details.h" /** * This section contains a convenience MACRO which allows an easy specialization of * std::vector such that for data types with alignment issues the correct allocator * is used automatically. */ #define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \ namespace std \ { \ template<> \ class vector<__VA_ARGS__, std::allocator<__VA_ARGS__> > \ : public vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ { \ typedef vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > vector_base; \ public: \ typedef __VA_ARGS__ value_type; \ typedef vector_base::allocator_type allocator_type; \ typedef vector_base::size_type size_type; \ typedef vector_base::iterator iterator; \ explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \ template \ vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \ vector(const vector& c) : vector_base(c) {} \ explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \ vector(iterator start, iterator end) : vector_base(start, end) {} \ vector& operator=(const vector& x) { \ vector_base::operator=(x); \ return *this; \ } \ }; \ } namespace std { #define EIGEN_STD_VECTOR_SPECIALIZATION_BODY \ public: \ typedef T value_type; \ typedef typename vector_base::allocator_type allocator_type; \ typedef typename vector_base::size_type size_type; \ typedef typename vector_base::iterator iterator; \ typedef typename vector_base::const_iterator const_iterator; \ explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \ template \ vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \ : vector_base(first, last, a) {} \ vector(const vector& c) : vector_base(c) {} \ explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \ vector(iterator start, iterator end) : vector_base(start, end) {} \ vector& operator=(const vector& x) { \ vector_base::operator=(x); \ return *this; \ } template class vector > : public vector > { typedef vector > vector_base; EIGEN_STD_VECTOR_SPECIALIZATION_BODY void resize(size_type new_size) { resize(new_size, T()); } #if defined(_VECTOR_) // workaround MSVC std::vector implementation void resize(size_type new_size, const value_type& x) { if (vector_base::size() < new_size) vector_base::_Insert_n(vector_base::end(), new_size - vector_base::size(), x); else if (new_size < vector_base::size()) vector_base::erase(vector_base::begin() + new_size, vector_base::end()); } void push_back(const value_type& x) { vector_base::push_back(x); } using vector_base::insert; iterator insert(const_iterator position, const value_type& x) { return vector_base::insert(position,x); } void insert(const_iterator position, size_type new_size, const value_type& x) { vector_base::insert(position, new_size, x); } #elif defined(_GLIBCXX_VECTOR) && (!(EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::vector::resize(size_type,const T&). * However, this specialization is still needed to make the above EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION trick to work. */ void resize(size_type new_size, const value_type& x) { vector_base::resize(new_size,x); } #elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,2) // workaround GCC std::vector implementation void resize(size_type new_size, const value_type& x) { if (new_size < vector_base::size()) vector_base::_M_erase_at_end(this->_M_impl._M_start + new_size); else vector_base::insert(vector_base::end(), new_size - vector_base::size(), x); } #else // either GCC 4.1 or non-GCC // default implementation which should always work. void resize(size_type new_size, const value_type& x) { if (new_size < vector_base::size()) vector_base::erase(vector_base::begin() + new_size, vector_base::end()); else if (new_size > vector_base::size()) vector_base::insert(vector_base::end(), new_size - vector_base::size(), x); } #endif }; } #endif // EIGEN_STDVECTOR_H RcppEigen/inst/include/Eigen/src/StlSupport/StdList.h0000644000176200001440000001063012375356050022250 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STDLIST_H #define EIGEN_STDLIST_H #include "details.h" // Define the explicit instantiation (e.g. necessary for the Intel compiler) #if defined(__INTEL_COMPILER) || defined(__GNUC__) #define EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(...) template class std::list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> >; #else #define EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(...) #endif /** * This section contains a convenience MACRO which allows an easy specialization of * std::list such that for data types with alignment issues the correct allocator * is used automatically. */ #define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) \ EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(__VA_ARGS__) \ namespace std \ { \ template \ class list<__VA_ARGS__, _Ay> \ : public list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ { \ typedef list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > list_base; \ public: \ typedef __VA_ARGS__ value_type; \ typedef typename list_base::allocator_type allocator_type; \ typedef typename list_base::size_type size_type; \ typedef typename list_base::iterator iterator; \ explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \ template \ list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \ list(const list& c) : list_base(c) {} \ explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \ list(iterator start, iterator end) : list_base(start, end) {} \ list& operator=(const list& x) { \ list_base::operator=(x); \ return *this; \ } \ }; \ } // check whether we really need the std::vector specialization #if !(defined(_GLIBCXX_VECTOR) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::list::resize(size_type,const T&). */ namespace std { #define EIGEN_STD_LIST_SPECIALIZATION_BODY \ public: \ typedef T value_type; \ typedef typename list_base::allocator_type allocator_type; \ typedef typename list_base::size_type size_type; \ typedef typename list_base::iterator iterator; \ typedef typename list_base::const_iterator const_iterator; \ explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \ template \ list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \ : list_base(first, last, a) {} \ list(const list& c) : list_base(c) {} \ explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \ list(iterator start, iterator end) : list_base(start, end) {} \ list& operator=(const list& x) { \ list_base::operator=(x); \ return *this; \ } template class list > : public list > { typedef list > list_base; EIGEN_STD_LIST_SPECIALIZATION_BODY void resize(size_type new_size) { resize(new_size, T()); } void resize(size_type new_size, const value_type& x) { if (list_base::size() < new_size) list_base::insert(list_base::end(), new_size - list_base::size(), x); else while (new_size < list_base::size()) list_base::pop_back(); } #if defined(_LIST_) // workaround MSVC std::list implementation void push_back(const value_type& x) { list_base::push_back(x); } using list_base::insert; iterator insert(const_iterator position, const value_type& x) { return list_base::insert(position,x); } void insert(const_iterator position, size_type new_size, const value_type& x) { list_base::insert(position, new_size, x); } #endif }; } #endif // check whether specialization is actually required #endif // EIGEN_STDLIST_H RcppEigen/inst/include/Eigen/src/StlSupport/StdDeque.h0000644000176200001440000001254212375356050022404 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2009 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STDDEQUE_H #define EIGEN_STDDEQUE_H #include "details.h" // Define the explicit instantiation (e.g. necessary for the Intel compiler) #if defined(__INTEL_COMPILER) || defined(__GNUC__) #define EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(...) template class std::deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> >; #else #define EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(...) #endif /** * This section contains a convenience MACRO which allows an easy specialization of * std::deque such that for data types with alignment issues the correct allocator * is used automatically. */ #define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) \ EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(__VA_ARGS__) \ namespace std \ { \ template \ class deque<__VA_ARGS__, _Ay> \ : public deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ { \ typedef deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > deque_base; \ public: \ typedef __VA_ARGS__ value_type; \ typedef typename deque_base::allocator_type allocator_type; \ typedef typename deque_base::size_type size_type; \ typedef typename deque_base::iterator iterator; \ explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \ template \ deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \ deque(const deque& c) : deque_base(c) {} \ explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \ deque(iterator start, iterator end) : deque_base(start, end) {} \ deque& operator=(const deque& x) { \ deque_base::operator=(x); \ return *this; \ } \ }; \ } // check whether we really need the std::deque specialization #if !(defined(_GLIBCXX_DEQUE) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::deque::resize(size_type,const T&). */ namespace std { #define EIGEN_STD_DEQUE_SPECIALIZATION_BODY \ public: \ typedef T value_type; \ typedef typename deque_base::allocator_type allocator_type; \ typedef typename deque_base::size_type size_type; \ typedef typename deque_base::iterator iterator; \ typedef typename deque_base::const_iterator const_iterator; \ explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \ template \ deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \ : deque_base(first, last, a) {} \ deque(const deque& c) : deque_base(c) {} \ explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \ deque(iterator start, iterator end) : deque_base(start, end) {} \ deque& operator=(const deque& x) { \ deque_base::operator=(x); \ return *this; \ } template class deque > : public deque > { typedef deque > deque_base; EIGEN_STD_DEQUE_SPECIALIZATION_BODY void resize(size_type new_size) { resize(new_size, T()); } #if defined(_DEQUE_) // workaround MSVC std::deque implementation void resize(size_type new_size, const value_type& x) { if (deque_base::size() < new_size) deque_base::_Insert_n(deque_base::end(), new_size - deque_base::size(), x); else if (new_size < deque_base::size()) deque_base::erase(deque_base::begin() + new_size, deque_base::end()); } void push_back(const value_type& x) { deque_base::push_back(x); } void push_front(const value_type& x) { deque_base::push_front(x); } using deque_base::insert; iterator insert(const_iterator position, const value_type& x) { return deque_base::insert(position,x); } void insert(const_iterator position, size_type new_size, const value_type& x) { deque_base::insert(position, new_size, x); } #elif defined(_GLIBCXX_DEQUE) && EIGEN_GNUC_AT_LEAST(4,2) // workaround GCC std::deque implementation void resize(size_type new_size, const value_type& x) { if (new_size < deque_base::size()) deque_base::_M_erase_at_end(this->_M_impl._M_start + new_size); else deque_base::insert(deque_base::end(), new_size - deque_base::size(), x); } #else // either GCC 4.1 or non-GCC // default implementation which should always work. void resize(size_type new_size, const value_type& x) { if (new_size < deque_base::size()) deque_base::erase(deque_base::begin() + new_size, deque_base::end()); else if (new_size > deque_base::size()) deque_base::insert(deque_base::end(), new_size - deque_base::size(), x); } #endif }; } #endif // check whether specialization is actually required #endif // EIGEN_STDDEQUE_H RcppEigen/inst/include/Eigen/src/Cholesky/0000755000176200001440000000000012647570720020137 5ustar liggesusersRcppEigen/inst/include/Eigen/src/Cholesky/LLT_MKL.h0000644000176200001440000000766412550462273021457 0ustar liggesusers/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to Intel(R) MKL * LLt decomposition based on LAPACKE_?potrf function. ******************************************************************************** */ #ifndef EIGEN_LLT_MKL_H #define EIGEN_LLT_MKL_H #include "Eigen/src/Core/util/MKL_support.h" #include namespace Eigen { namespace internal { template struct mkl_llt; #define EIGEN_MKL_LLT(EIGTYPE, MKLTYPE, MKLPREFIX) \ template<> struct mkl_llt \ { \ template \ static inline typename MatrixType::Index potrf(MatrixType& m, char uplo) \ { \ lapack_int matrix_order; \ lapack_int size, lda, info, StorageOrder; \ EIGTYPE* a; \ eigen_assert(m.rows()==m.cols()); \ /* Set up parameters for ?potrf */ \ size = m.rows(); \ StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \ matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ a = &(m.coeffRef(0,0)); \ lda = m.outerStride(); \ \ info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \ info = (info==0) ? -1 : info>0 ? info-1 : size; \ return info; \ } \ }; \ template<> struct llt_inplace \ { \ template \ static typename MatrixType::Index blocked(MatrixType& m) \ { \ return mkl_llt::potrf(m, 'L'); \ } \ template \ static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \ }; \ template<> struct llt_inplace \ { \ template \ static typename MatrixType::Index blocked(MatrixType& m) \ { \ return mkl_llt::potrf(m, 'U'); \ } \ template \ static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ { \ Transpose matt(mat); \ return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); \ } \ }; EIGEN_MKL_LLT(double, double, d) EIGEN_MKL_LLT(float, float, s) EIGEN_MKL_LLT(dcomplex, MKL_Complex16, z) EIGEN_MKL_LLT(scomplex, MKL_Complex8, c) } // end namespace internal } // end namespace Eigen #endif // EIGEN_LLT_MKL_H RcppEigen/inst/include/Eigen/src/Cholesky/LLT.h0000644000176200001440000003751312647570720020754 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LLT_H #define EIGEN_LLT_H namespace Eigen { namespace internal{ template struct LLT_Traits; } /** \ingroup Cholesky_Module * * \class LLT * * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features * * \param MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition * \param UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. * The other triangular part won't be read. * * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite * matrix A such that A = LL^* = U^*U, where L is lower triangular. * * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like D^*D x = b, * for that purpose, we recommend the Cholesky decomposition without square root which is more stable * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other * situations like generalised eigen problems with hermitian matrices. * * Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive definite matrices, * use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine whether a system of equations * has a solution. * * Example: \include LLT_example.cpp * Output: \verbinclude LLT_example.out * * \sa MatrixBase::llt(), class LDLT */ /* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH) * Note that during the decomposition, only the upper triangular part of A is considered. Therefore, * the strict lower part does not have to store correct values. */ template class LLT { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; enum { PacketSize = internal::packet_traits::size, AlignmentMask = int(PacketSize)-1, UpLo = _UpLo }; typedef internal::LLT_Traits Traits; /** * \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via LLT::compute(const MatrixType&). */ LLT() : m_matrix(), m_isInitialized(false) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa LLT() */ LLT(Index size) : m_matrix(size, size), m_isInitialized(false) {} LLT(const MatrixType& matrix) : m_matrix(matrix.rows(), matrix.cols()), m_isInitialized(false) { compute(matrix); } /** \returns a view of the upper triangular matrix U */ inline typename Traits::MatrixU matrixU() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return Traits::getU(m_matrix); } /** \returns a view of the lower triangular matrix L */ inline typename Traits::MatrixL matrixL() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return Traits::getL(m_matrix); } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. * * Since this LLT class assumes anyway that the matrix A is invertible, the solution * theoretically exists and is unique regardless of b. * * Example: \include LLT_solve.cpp * Output: \verbinclude LLT_solve.out * * \sa solveInPlace(), MatrixBase::llt() */ template inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(m_matrix.rows()==b.rows() && "LLT::solve(): invalid number of rows of the right hand side matrix b"); return internal::solve_retval(*this, b.derived()); } #ifdef EIGEN2_SUPPORT template bool solve(const MatrixBase& b, ResultType *result) const { *result = this->solve(b); return true; } bool isPositiveDefinite() const { return true; } #endif template void solveInPlace(MatrixBase &bAndX) const; LLT& compute(const MatrixType& matrix); /** \returns the LLT decomposition matrix * * TODO: document the storage layout */ inline const MatrixType& matrixLLT() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return m_matrix; } MatrixType reconstructedMatrix() const; /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix.appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return m_info; } inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } template LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1); protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } /** \internal * Used to compute and store L * The strict upper part is not used and even not initialized. */ MatrixType m_matrix; bool m_isInitialized; ComputationInfo m_info; }; namespace internal { template struct llt_inplace; template static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) { using std::sqrt; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef typename MatrixType::ColXpr ColXpr; typedef typename internal::remove_all::type ColXprCleaned; typedef typename ColXprCleaned::SegmentReturnType ColXprSegment; typedef Matrix TempVectorType; typedef typename TempVectorType::SegmentReturnType TempVecSegment; Index n = mat.cols(); eigen_assert(mat.rows()==n && vec.size()==n); TempVectorType temp; if(sigma>0) { // This version is based on Givens rotations. // It is faster than the other one below, but only works for updates, // i.e., for sigma > 0 temp = sqrt(sigma) * vec; for(Index i=0; i g; g.makeGivens(mat(i,i), -temp(i), &mat(i,i)); Index rs = n-i-1; if(rs>0) { ColXprSegment x(mat.col(i).tail(rs)); TempVecSegment y(temp.tail(rs)); apply_rotation_in_the_plane(x, y, g); } } } else { temp = vec; RealScalar beta = 1; for(Index j=0; j struct llt_inplace { typedef typename NumTraits::Real RealScalar; template static typename MatrixType::Index unblocked(MatrixType& mat) { using std::sqrt; typedef typename MatrixType::Index Index; eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); for(Index k = 0; k < size; ++k) { Index rs = size-k-1; // remaining size Block A21(mat,k+1,k,rs,1); Block A10(mat,k,0,1,k); Block A20(mat,k+1,0,rs,k); RealScalar x = numext::real(mat.coeff(k,k)); if (k>0) x -= A10.squaredNorm(); if (x<=RealScalar(0)) return k; mat.coeffRef(k,k) = x = sqrt(x); if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint(); if (rs>0) A21 /= x; } return -1; } template static typename MatrixType::Index blocked(MatrixType& m) { typedef typename MatrixType::Index Index; eigen_assert(m.rows()==m.cols()); Index size = m.rows(); if(size<32) return unblocked(m); Index blockSize = size/8; blockSize = (blockSize/16)*16; blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128)); for (Index k=0; k A11(m,k, k, bs,bs); Block A21(m,k+bs,k, rs,bs); Block A22(m,k+bs,k+bs,rs,rs); Index ret; if((ret=unblocked(A11))>=0) return k+ret; if(rs>0) A11.adjoint().template triangularView().template solveInPlace(A21); if(rs>0) A22.template selfadjointView().rankUpdate(A21,-1); // bottleneck } return -1; } template static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } }; template struct llt_inplace { typedef typename NumTraits::Real RealScalar; template static EIGEN_STRONG_INLINE typename MatrixType::Index unblocked(MatrixType& mat) { Transpose matt(mat); return llt_inplace::unblocked(matt); } template static EIGEN_STRONG_INLINE typename MatrixType::Index blocked(MatrixType& mat) { Transpose matt(mat); return llt_inplace::blocked(matt); } template static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) { Transpose matt(mat); return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); } }; template struct LLT_Traits { typedef const TriangularView MatrixL; typedef const TriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return m; } static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } static bool inplace_decomposition(MatrixType& m) { return llt_inplace::blocked(m)==-1; } }; template struct LLT_Traits { typedef const TriangularView MatrixL; typedef const TriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return m.adjoint(); } static inline MatrixU getU(const MatrixType& m) { return m; } static bool inplace_decomposition(MatrixType& m) { return llt_inplace::blocked(m)==-1; } }; } // end namespace internal /** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix * * \returns a reference to *this * * Example: \include TutorialLinAlgComputeTwice.cpp * Output: \verbinclude TutorialLinAlgComputeTwice.out */ template LLT& LLT::compute(const MatrixType& a) { check_template_parameters(); eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix.resize(size, size); m_matrix = a; m_isInitialized = true; bool ok = Traits::inplace_decomposition(m_matrix); m_info = ok ? Success : NumericalIssue; return *this; } /** Performs a rank one update (or dowdate) of the current decomposition. * If A = LL^* before the rank one update, * then after it we have LL^* = A + sigma * v v^* where \a v must be a vector * of same dimension. */ template template LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, const RealScalar& sigma) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType); eigen_assert(v.size()==m_matrix.cols()); eigen_assert(m_isInitialized); if(internal::llt_inplace::rankUpdate(m_matrix,v,sigma)>=0) m_info = NumericalIssue; else m_info = Success; return *this; } namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef LLT<_MatrixType,UpLo> LLTType; EIGEN_MAKE_SOLVE_HELPERS(LLTType,Rhs) template void evalTo(Dest& dst) const { dst = rhs(); dec().solveInPlace(dst); } }; } /** \internal use x = llt_object.solve(x); * * This is the \em in-place version of solve(). * * \param bAndX represents both the right-hand side matrix b and result x. * * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD. * * This version avoids a copy when the right hand side matrix b is not * needed anymore. * * \sa LLT::solve(), MatrixBase::llt() */ template template void LLT::solveInPlace(MatrixBase &bAndX) const { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(m_matrix.rows()==bAndX.rows()); matrixL().solveInPlace(bAndX); matrixU().solveInPlace(bAndX); } /** \returns the matrix represented by the decomposition, * i.e., it returns the product: L L^*. * This function is provided for debug purpose. */ template MatrixType LLT::reconstructedMatrix() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return matrixL() * matrixL().adjoint().toDenseMatrix(); } /** \cholesky_module * \returns the LLT decomposition of \c *this */ template inline const LLT::PlainObject> MatrixBase::llt() const { return LLT(derived()); } /** \cholesky_module * \returns the LLT decomposition of \c *this */ template inline const LLT::PlainObject, UpLo> SelfAdjointView::llt() const { return LLT(m_matrix); } } // end namespace Eigen #endif // EIGEN_LLT_H RcppEigen/inst/include/Eigen/src/Cholesky/LDLT.h0000644000176200001440000005223512647570720021056 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2011 Gael Guennebaud // Copyright (C) 2009 Keir Mierle // Copyright (C) 2009 Benoit Jacob // Copyright (C) 2011 Timothy E. Holy // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LDLT_H #define EIGEN_LDLT_H namespace Eigen { namespace internal { template struct LDLT_Traits; // PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite }; } /** \ingroup Cholesky_Module * * \class LDLT * * \brief Robust Cholesky decomposition of a matrix with pivoting * * \param MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition * \param UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. * The other triangular part won't be read. * * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite * matrix \f$ A \f$ such that \f$ A = P^TLDL^*P \f$, where P is a permutation matrix, L * is lower triangular with a unit diagonal and D is a diagonal matrix. * * The decomposition uses pivoting to ensure stability, so that L will have * zeros in the bottom right rank(A) - n submatrix. Avoiding the square root * on D also stabilizes the computation. * * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky * decomposition to determine whether a system of equations has a solution. * * \sa MatrixBase::ldlt(), class LLT */ template class LDLT { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options & ~RowMajorBit, // these are the options for the TmpMatrixType, we need a ColMajor matrix here! MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, UpLo = _UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; typedef Matrix TmpMatrixType; typedef Transpositions TranspositionType; typedef PermutationMatrix PermutationType; typedef internal::LDLT_Traits Traits; /** \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via LDLT::compute(const MatrixType&). */ LDLT() : m_matrix(), m_transpositions(), m_sign(internal::ZeroSign), m_isInitialized(false) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa LDLT() */ LDLT(Index size) : m_matrix(size, size), m_transpositions(size), m_temporary(size), m_sign(internal::ZeroSign), m_isInitialized(false) {} /** \brief Constructor with decomposition * * This calculates the decomposition for the input \a matrix. * \sa LDLT(Index size) */ LDLT(const MatrixType& matrix) : m_matrix(matrix.rows(), matrix.cols()), m_transpositions(matrix.rows()), m_temporary(matrix.rows()), m_sign(internal::ZeroSign), m_isInitialized(false) { compute(matrix); } /** Clear any existing decomposition * \sa rankUpdate(w,sigma) */ void setZero() { m_isInitialized = false; } /** \returns a view of the upper triangular matrix U */ inline typename Traits::MatrixU matrixU() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return Traits::getU(m_matrix); } /** \returns a view of the lower triangular matrix L */ inline typename Traits::MatrixL matrixL() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return Traits::getL(m_matrix); } /** \returns the permutation matrix P as a transposition sequence. */ inline const TranspositionType& transpositionsP() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_transpositions; } /** \returns the coefficients of the diagonal matrix D */ inline Diagonal vectorD() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_matrix.diagonal(); } /** \returns true if the matrix is positive (semidefinite) */ inline bool isPositive() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign; } #ifdef EIGEN2_SUPPORT inline bool isPositiveDefinite() const { return isPositive(); } #endif /** \returns true if the matrix is negative (semidefinite) */ inline bool isNegative(void) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign; } /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A. * * This function also supports in-place solves using the syntax x = decompositionObject.solve(x) . * * \note_about_checking_solutions * * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$ * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function * computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular. * * \sa MatrixBase::ldlt() */ template inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); eigen_assert(m_matrix.rows()==b.rows() && "LDLT::solve(): invalid number of rows of the right hand side matrix b"); return internal::solve_retval(*this, b.derived()); } #ifdef EIGEN2_SUPPORT template bool solve(const MatrixBase& b, ResultType *result) const { *result = this->solve(b); return true; } #endif template bool solveInPlace(MatrixBase &bAndX) const; LDLT& compute(const MatrixType& matrix); template LDLT& rankUpdate(const MatrixBase& w, const RealScalar& alpha=1); /** \returns the internal LDLT decomposition matrix * * TODO: document the storage layout */ inline const MatrixType& matrixLDLT() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_matrix; } MatrixType reconstructedMatrix() const; inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix.appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return Success; } protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } /** \internal * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U. * The strict upper part is used during the decomposition, the strict lower * part correspond to the coefficients of L (its diagonal is equal to 1 and * is not stored), and the diagonal entries correspond to D. */ MatrixType m_matrix; TranspositionType m_transpositions; TmpMatrixType m_temporary; internal::SignMatrix m_sign; bool m_isInitialized; }; namespace internal { template struct ldlt_inplace; template<> struct ldlt_inplace { template static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { using std::abs; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); if (size <= 1) { transpositions.setIdentity(); if (numext::real(mat.coeff(0,0)) > 0) sign = PositiveSemiDef; else if (numext::real(mat.coeff(0,0)) < 0) sign = NegativeSemiDef; else sign = ZeroSign; return true; } for (Index k = 0; k < size; ++k) { // Find largest diagonal element Index index_of_biggest_in_corner; mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); index_of_biggest_in_corner += k; transpositions.coeffRef(k) = index_of_biggest_in_corner; if(k != index_of_biggest_in_corner) { // apply the transposition while taking care to consider only // the lower triangular part Index s = size-index_of_biggest_in_corner-1; // trailing size after the biggest element mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k)); mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s)); std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner)); for(int i=k+1;i::IsComplex) mat.coeffRef(index_of_biggest_in_corner,k) = numext::conj(mat.coeff(index_of_biggest_in_corner,k)); } // partition the matrix: // A00 | - | - // lu = A10 | A11 | - // A20 | A21 | A22 Index rs = size - k - 1; Block A21(mat,k+1,k,rs,1); Block A10(mat,k,0,1,k); Block A20(mat,k+1,0,rs,k); if(k>0) { temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint(); mat.coeffRef(k,k) -= (A10 * temp.head(k)).value(); if(rs>0) A21.noalias() -= A20 * temp.head(k); } // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot // was smaller than the cutoff value. However, soince LDLT is not rank-revealing // we should only make sure we do not introduce INF or NaN values. // LAPACK also uses 0 as the cutoff value. RealScalar realAkk = numext::real(mat.coeffRef(k,k)); if((rs>0) && (abs(realAkk) > RealScalar(0))) A21 /= realAkk; if (sign == PositiveSemiDef) { if (realAkk < 0) sign = Indefinite; } else if (sign == NegativeSemiDef) { if (realAkk > 0) sign = Indefinite; } else if (sign == ZeroSign) { if (realAkk > 0) sign = PositiveSemiDef; else if (realAkk < 0) sign = NegativeSemiDef; } } return true; } // Reference for the algorithm: Davis and Hager, "Multiple Rank // Modifications of a Sparse Cholesky Factorization" (Algorithm 1) // Trivial rearrangements of their computations (Timothy E. Holy) // allow their algorithm to work for rank-1 updates even if the // original matrix is not of full rank. // Here only rank-1 updates are implemented, to reduce the // requirement for intermediate storage and improve accuracy template static bool updateInPlace(MatrixType& mat, MatrixBase& w, const typename MatrixType::RealScalar& sigma=1) { using numext::isfinite; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; const Index size = mat.rows(); eigen_assert(mat.cols() == size && w.size()==size); RealScalar alpha = 1; // Apply the update for (Index j = 0; j < size; j++) { // Check for termination due to an original decomposition of low-rank if (!(isfinite)(alpha)) break; // Update the diagonal terms RealScalar dj = numext::real(mat.coeff(j,j)); Scalar wj = w.coeff(j); RealScalar swj2 = sigma*numext::abs2(wj); RealScalar gamma = dj*alpha + swj2; mat.coeffRef(j,j) += swj2/alpha; alpha += swj2/dj; // Update the terms of L Index rs = size-j-1; w.tail(rs) -= wj * mat.col(j).tail(rs); if(gamma != 0) mat.col(j).tail(rs) += (sigma*numext::conj(wj)/gamma)*w.tail(rs); } return true; } template static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, const typename MatrixType::RealScalar& sigma=1) { // Apply the permutation to the input w tmp = transpositions * w; return ldlt_inplace::updateInPlace(mat,tmp,sigma); } }; template<> struct ldlt_inplace { template static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { Transpose matt(mat); return ldlt_inplace::unblocked(matt, transpositions, temp, sign); } template static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, const typename MatrixType::RealScalar& sigma=1) { Transpose matt(mat); return ldlt_inplace::update(matt, transpositions, tmp, w.conjugate(), sigma); } }; template struct LDLT_Traits { typedef const TriangularView MatrixL; typedef const TriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return m; } static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } }; template struct LDLT_Traits { typedef const TriangularView MatrixL; typedef const TriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return m.adjoint(); } static inline MatrixU getU(const MatrixType& m) { return m; } }; } // end namespace internal /** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix */ template LDLT& LDLT::compute(const MatrixType& a) { check_template_parameters(); eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix = a; m_transpositions.resize(size); m_isInitialized = false; m_temporary.resize(size); m_sign = internal::ZeroSign; internal::ldlt_inplace::unblocked(m_matrix, m_transpositions, m_temporary, m_sign); m_isInitialized = true; return *this; } /** Update the LDLT decomposition: given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T. * \param w a vector to be incorporated into the decomposition. * \param sigma a scalar, +1 for updates and -1 for "downdates," which correspond to removing previously-added column vectors. Optional; default value is +1. * \sa setZero() */ template template LDLT& LDLT::rankUpdate(const MatrixBase& w, const typename LDLT::RealScalar& sigma) { const Index size = w.rows(); if (m_isInitialized) { eigen_assert(m_matrix.rows()==size); } else { m_matrix.resize(size,size); m_matrix.setZero(); m_transpositions.resize(size); for (Index i = 0; i < size; i++) m_transpositions.coeffRef(i) = i; m_temporary.resize(size); m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef; m_isInitialized = true; } internal::ldlt_inplace::update(m_matrix, m_transpositions, m_temporary, w, sigma); return *this; } namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef LDLT<_MatrixType,_UpLo> LDLTType; EIGEN_MAKE_SOLVE_HELPERS(LDLTType,Rhs) template void evalTo(Dest& dst) const { eigen_assert(rhs().rows() == dec().matrixLDLT().rows()); // dst = P b dst = dec().transpositionsP() * rhs(); // dst = L^-1 (P b) dec().matrixL().solveInPlace(dst); // dst = D^-1 (L^-1 P b) // more precisely, use pseudo-inverse of D (see bug 241) using std::abs; using std::max; typedef typename LDLTType::MatrixType MatrixType; typedef typename LDLTType::RealScalar RealScalar; const typename Diagonal::RealReturnType vectorD(dec().vectorD()); // In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon // as motivated by LAPACK's xGELSS: // RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() *NumTraits::epsilon(),RealScalar(1) / NumTraits::highest()); // However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest // diagonal element is not well justified and to numerical issues in some cases. // Moreover, Lapack's xSYTRS routines use 0 for the tolerance. RealScalar tolerance = RealScalar(1) / NumTraits::highest(); for (Index i = 0; i < vectorD.size(); ++i) { if(abs(vectorD(i)) > tolerance) dst.row(i) /= vectorD(i); else dst.row(i).setZero(); } // dst = L^-T (D^-1 L^-1 P b) dec().matrixU().solveInPlace(dst); // dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b dst = dec().transpositionsP().transpose() * dst; } }; } /** \internal use x = ldlt_object.solve(x); * * This is the \em in-place version of solve(). * * \param bAndX represents both the right-hand side matrix b and result x. * * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD. * * This version avoids a copy when the right hand side matrix b is not * needed anymore. * * \sa LDLT::solve(), MatrixBase::ldlt() */ template template bool LDLT::solveInPlace(MatrixBase &bAndX) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); eigen_assert(m_matrix.rows() == bAndX.rows()); bAndX = this->solve(bAndX); return true; } /** \returns the matrix represented by the decomposition, * i.e., it returns the product: P^T L D L^* P. * This function is provided for debug purpose. */ template MatrixType LDLT::reconstructedMatrix() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); const Index size = m_matrix.rows(); MatrixType res(size,size); // P res.setIdentity(); res = transpositionsP() * res; // L^* P res = matrixU() * res; // D(L^*P) res = vectorD().real().asDiagonal() * res; // L(DL^*P) res = matrixL() * res; // P^T (LDL^*P) res = transpositionsP().transpose() * res; return res; } /** \cholesky_module * \returns the Cholesky decomposition with full pivoting without square root of \c *this */ template inline const LDLT::PlainObject, UpLo> SelfAdjointView::ldlt() const { return LDLT(m_matrix); } /** \cholesky_module * \returns the Cholesky decomposition with full pivoting without square root of \c *this */ template inline const LDLT::PlainObject> MatrixBase::ldlt() const { return LDLT(derived()); } } // end namespace Eigen #endif // EIGEN_LDLT_H RcppEigen/inst/include/Eigen/src/MetisSupport/0000755000176200001440000000000012253717461021032 5ustar liggesusersRcppEigen/inst/include/Eigen/src/MetisSupport/MetisSupport.h0000644000176200001440000001050012253717461023655 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef METIS_SUPPORT_H #define METIS_SUPPORT_H namespace Eigen { /** * Get the fill-reducing ordering from the METIS package * * If A is the original matrix and Ap is the permuted matrix, * the fill-reducing permutation is defined as follows : * Row (column) i of A is the matperm(i) row (column) of Ap. * WARNING: As computed by METIS, this corresponds to the vector iperm (instead of perm) */ template class MetisOrdering { public: typedef PermutationMatrix PermutationType; typedef Matrix IndexVector; template void get_symmetrized_graph(const MatrixType& A) { Index m = A.cols(); eigen_assert((A.rows() == A.cols()) && "ONLY FOR SQUARED MATRICES"); // Get the transpose of the input matrix MatrixType At = A.transpose(); // Get the number of nonzeros elements in each row/col of At+A Index TotNz = 0; IndexVector visited(m); visited.setConstant(-1); for (int j = 0; j < m; j++) { // Compute the union structure of of A(j,:) and At(j,:) visited(j) = j; // Do not include the diagonal element // Get the nonzeros in row/column j of A for (typename MatrixType::InnerIterator it(A, j); it; ++it) { Index idx = it.index(); // Get the row index (for column major) or column index (for row major) if (visited(idx) != j ) { visited(idx) = j; ++TotNz; } } //Get the nonzeros in row/column j of At for (typename MatrixType::InnerIterator it(At, j); it; ++it) { Index idx = it.index(); if(visited(idx) != j) { visited(idx) = j; ++TotNz; } } } // Reserve place for A + At m_indexPtr.resize(m+1); m_innerIndices.resize(TotNz); // Now compute the real adjacency list of each column/row visited.setConstant(-1); Index CurNz = 0; for (int j = 0; j < m; j++) { m_indexPtr(j) = CurNz; visited(j) = j; // Do not include the diagonal element // Add the pattern of row/column j of A to A+At for (typename MatrixType::InnerIterator it(A,j); it; ++it) { Index idx = it.index(); // Get the row index (for column major) or column index (for row major) if (visited(idx) != j ) { visited(idx) = j; m_innerIndices(CurNz) = idx; CurNz++; } } //Add the pattern of row/column j of At to A+At for (typename MatrixType::InnerIterator it(At, j); it; ++it) { Index idx = it.index(); if(visited(idx) != j) { visited(idx) = j; m_innerIndices(CurNz) = idx; ++CurNz; } } } m_indexPtr(m) = CurNz; } template void operator() (const MatrixType& A, PermutationType& matperm) { Index m = A.cols(); IndexVector perm(m),iperm(m); // First, symmetrize the matrix graph. get_symmetrized_graph(A); int output_error; // Call the fill-reducing routine from METIS output_error = METIS_NodeND(&m, m_indexPtr.data(), m_innerIndices.data(), NULL, NULL, perm.data(), iperm.data()); if(output_error != METIS_OK) { //FIXME The ordering interface should define a class of possible errors std::cerr << "ERROR WHILE CALLING THE METIS PACKAGE \n"; return; } // Get the fill-reducing permutation //NOTE: If Ap is the permuted matrix then perm and iperm vectors are defined as follows // Row (column) i of Ap is the perm(i) row(column) of A, and row (column) i of A is the iperm(i) row(column) of Ap matperm.resize(m); for (int j = 0; j < m; j++) matperm.indices()(iperm(j)) = j; } protected: IndexVector m_indexPtr; // Pointer to the adjacenccy list of each row/column IndexVector m_innerIndices; // Adjacency list }; }// end namespace eigen #endif RcppEigen/inst/include/Eigen/src/SparseCholesky/0000755000176200001440000000000012375356050021311 5ustar liggesusersRcppEigen/inst/include/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h0000644000176200001440000001514612253717461026304 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2012 Gael Guennebaud /* NOTE: thes functions vave been adapted from the LDL library: LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved. LDL License: Your use or distribution of LDL or any modified version of LDL implies that you agree to this License. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Permission is hereby granted to use or copy this program under the terms of the GNU LGPL, provided that the Copyright, this License, and the Availability of the original version is retained on all copies. User documentation of any code that uses this code or any modified version of this code must cite the Copyright, this License, the Availability note, and "Used by permission." Permission to modify the code and to distribute modified code is granted, provided the Copyright, this License, and the Availability note are retained, and a notice that the code was modified is included. */ #include "../Core/util/NonMPL2.h" #ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H #define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H namespace Eigen { template void SimplicialCholeskyBase::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT) { const Index size = ap.rows(); m_matrix.resize(size, size); m_parent.resize(size); m_nonZerosPerCol.resize(size); ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); for(Index k = 0; k < size; ++k) { /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ m_parent[k] = -1; /* parent of k is not yet known */ tags[k] = k; /* mark node k as visited */ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) { Index i = it.index(); if(i < k) { /* follow path from i to root of etree, stop at flagged node */ for(; tags[i] != k; i = m_parent[i]) { /* find parent of i if not yet determined */ if (m_parent[i] == -1) m_parent[i] = k; m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */ tags[i] = k; /* mark i as visited */ } } } } /* construct Lp index array from m_nonZerosPerCol column counts */ Index* Lp = m_matrix.outerIndexPtr(); Lp[0] = 0; for(Index k = 0; k < size; ++k) Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1); m_matrix.resizeNonZeros(Lp[size]); m_isInitialized = true; m_info = Success; m_analysisIsOk = true; m_factorizationIsOk = false; } template template void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& ap) { using std::sqrt; eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); eigen_assert(ap.rows()==ap.cols()); const Index size = ap.rows(); eigen_assert(m_parent.size()==size); eigen_assert(m_nonZerosPerCol.size()==size); const Index* Lp = m_matrix.outerIndexPtr(); Index* Li = m_matrix.innerIndexPtr(); Scalar* Lx = m_matrix.valuePtr(); ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0); ei_declare_aligned_stack_constructed_variable(Index, pattern, size, 0); ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); bool ok = true; m_diag.resize(DoLDLT ? size : 0); for(Index k = 0; k < size; ++k) { // compute nonzero pattern of kth row of L, in topological order y[k] = 0.0; // Y(0:k) is now all zero Index top = size; // stack for pattern is empty tags[k] = k; // mark node k as visited m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L for(typename MatrixType::InnerIterator it(ap,k); it; ++it) { Index i = it.index(); if(i <= k) { y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */ Index len; for(len = 0; tags[i] != k; i = m_parent[i]) { pattern[len++] = i; /* L(k,i) is nonzero */ tags[i] = k; /* mark i as visited */ } while(len > 0) pattern[--top] = pattern[--len]; } } /* compute numerical values kth row of L (a sparse triangular solve) */ RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k) y[k] = 0.0; for(; top < size; ++top) { Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ Scalar yi = y[i]; /* get and clear Y(i) */ y[i] = 0.0; /* the nonzero entry L(k,i) */ Scalar l_ki; if(DoLDLT) l_ki = yi / m_diag[i]; else yi = l_ki = yi / Lx[Lp[i]]; Index p2 = Lp[i] + m_nonZerosPerCol[i]; Index p; for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p) y[Li[p]] -= numext::conj(Lx[p]) * yi; d -= numext::real(l_ki * numext::conj(yi)); Li[p] = k; /* store L(k,i) in column form of L */ Lx[p] = l_ki; ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */ } if(DoLDLT) { m_diag[k] = d; if(d == RealScalar(0)) { ok = false; /* failure, D(k,k) is zero */ break; } } else { Index p = Lp[k] + m_nonZerosPerCol[k]++; Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */ if(d <= RealScalar(0)) { ok = false; /* failure, matrix is not positive definite */ break; } Lx[p] = sqrt(d) ; } } m_info = ok ? Success : NumericalIssue; m_factorizationIsOk = true; } } // end namespace Eigen #endif // EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H RcppEigen/inst/include/Eigen/src/SparseCholesky/SimplicialCholesky.h0000644000176200001440000005556212375356050025267 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SIMPLICIAL_CHOLESKY_H #define EIGEN_SIMPLICIAL_CHOLESKY_H namespace Eigen { enum SimplicialCholeskyMode { SimplicialCholeskyLLT, SimplicialCholeskyLDLT }; /** \ingroup SparseCholesky_Module * \brief A direct sparse Cholesky factorizations * * These classes provide LL^T and LDL^T Cholesky factorizations of sparse matrices that are * selfadjoint and positive definite. The factorization allows for solving A.X = B where * X and B can be either dense or sparse. * * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization * such that the factorized matrix is P A P^-1. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * */ template class SimplicialCholeskyBase : internal::noncopyable { public: typedef typename internal::traits::MatrixType MatrixType; typedef typename internal::traits::OrderingType OrderingType; enum { UpLo = internal::traits::UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; public: /** Default constructor */ SimplicialCholeskyBase() : m_info(Success), m_isInitialized(false), m_shiftOffset(0), m_shiftScale(1) {} SimplicialCholeskyBase(const MatrixType& matrix) : m_info(Success), m_isInitialized(false), m_shiftOffset(0), m_shiftScale(1) { derived().compute(matrix); } ~SimplicialCholeskyBase() { } Derived& derived() { return *static_cast(this); } const Derived& derived() const { return *static_cast(this); } inline Index cols() const { return m_matrix.cols(); } inline Index rows() const { return m_matrix.rows(); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix.appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. * * \sa compute() */ template inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(m_isInitialized && "Simplicial LLT or LDLT is not initialized."); eigen_assert(rows()==b.rows() && "SimplicialCholeskyBase::solve(): invalid number of rows of the right hand side matrix b"); return internal::solve_retval(*this, b.derived()); } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. * * \sa compute() */ template inline const internal::sparse_solve_retval solve(const SparseMatrixBase& b) const { eigen_assert(m_isInitialized && "Simplicial LLT or LDLT is not initialized."); eigen_assert(rows()==b.rows() && "SimplicialCholesky::solve(): invalid number of rows of the right hand side matrix b"); return internal::sparse_solve_retval(*this, b.derived()); } /** \returns the permutation P * \sa permutationPinv() */ const PermutationMatrix& permutationP() const { return m_P; } /** \returns the inverse P^-1 of the permutation P * \sa permutationP() */ const PermutationMatrix& permutationPinv() const { return m_Pinv; } /** Sets the shift parameters that will be used to adjust the diagonal coefficients during the numerical factorization. * * During the numerical factorization, the diagonal coefficients are transformed by the following linear model:\n * \c d_ii = \a offset + \a scale * \c d_ii * * The default is the identity transformation with \a offset=0, and \a scale=1. * * \returns a reference to \c *this. */ Derived& setShift(const RealScalar& offset, const RealScalar& scale = 1) { m_shiftOffset = offset; m_shiftScale = scale; return derived(); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal */ template void dumpMemory(Stream& s) { int total = 0; s << " L: " << ((total+=(m_matrix.cols()+1) * sizeof(int) + m_matrix.nonZeros()*(sizeof(int)+sizeof(Scalar))) >> 20) << "Mb" << "\n"; s << " diag: " << ((total+=m_diag.size() * sizeof(Scalar)) >> 20) << "Mb" << "\n"; s << " tree: " << ((total+=m_parent.size() * sizeof(int)) >> 20) << "Mb" << "\n"; s << " nonzeros: " << ((total+=m_nonZerosPerCol.size() * sizeof(int)) >> 20) << "Mb" << "\n"; s << " perm: " << ((total+=m_P.size() * sizeof(int)) >> 20) << "Mb" << "\n"; s << " perm^-1: " << ((total+=m_Pinv.size() * sizeof(int)) >> 20) << "Mb" << "\n"; s << " TOTAL: " << (total>> 20) << "Mb" << "\n"; } /** \internal */ template void _solve(const MatrixBase &b, MatrixBase &dest) const { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); eigen_assert(m_matrix.rows()==b.rows()); if(m_info!=Success) return; if(m_P.size()>0) dest = m_P * b; else dest = b; if(m_matrix.nonZeros()>0) // otherwise L==I derived().matrixL().solveInPlace(dest); if(m_diag.size()>0) dest = m_diag.asDiagonal().inverse() * dest; if (m_matrix.nonZeros()>0) // otherwise U==I derived().matrixU().solveInPlace(dest); if(m_P.size()>0) dest = m_Pinv * dest; } #endif // EIGEN_PARSED_BY_DOXYGEN protected: /** Computes the sparse Cholesky decomposition of \a matrix */ template void compute(const MatrixType& matrix) { eigen_assert(matrix.rows()==matrix.cols()); Index size = matrix.cols(); CholMatrixType ap(size,size); ordering(matrix, ap); analyzePattern_preordered(ap, DoLDLT); factorize_preordered(ap); } template void factorize(const MatrixType& a) { eigen_assert(a.rows()==a.cols()); int size = a.cols(); CholMatrixType ap(size,size); ap.template selfadjointView() = a.template selfadjointView().twistedBy(m_P); factorize_preordered(ap); } template void factorize_preordered(const CholMatrixType& a); void analyzePattern(const MatrixType& a, bool doLDLT) { eigen_assert(a.rows()==a.cols()); int size = a.cols(); CholMatrixType ap(size,size); ordering(a, ap); analyzePattern_preordered(ap,doLDLT); } void analyzePattern_preordered(const CholMatrixType& a, bool doLDLT); void ordering(const MatrixType& a, CholMatrixType& ap); /** keeps off-diagonal entries; drops diagonal entries */ struct keep_diag { inline bool operator() (const Index& row, const Index& col, const Scalar&) const { return row!=col; } }; mutable ComputationInfo m_info; bool m_isInitialized; bool m_factorizationIsOk; bool m_analysisIsOk; CholMatrixType m_matrix; VectorType m_diag; // the diagonal coefficients (LDLT mode) VectorXi m_parent; // elimination tree VectorXi m_nonZerosPerCol; PermutationMatrix m_P; // the permutation PermutationMatrix m_Pinv; // the inverse permutation RealScalar m_shiftOffset; RealScalar m_shiftScale; }; template > class SimplicialLLT; template > class SimplicialLDLT; template > class SimplicialCholesky; namespace internal { template struct traits > { typedef _MatrixType MatrixType; typedef _Ordering OrderingType; enum { UpLo = _UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef SparseMatrix CholMatrixType; typedef SparseTriangularView MatrixL; typedef SparseTriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return m; } static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } }; template struct traits > { typedef _MatrixType MatrixType; typedef _Ordering OrderingType; enum { UpLo = _UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef SparseMatrix CholMatrixType; typedef SparseTriangularView MatrixL; typedef SparseTriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return m; } static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } }; template struct traits > { typedef _MatrixType MatrixType; typedef _Ordering OrderingType; enum { UpLo = _UpLo }; }; } /** \ingroup SparseCholesky_Module * \class SimplicialLLT * \brief A direct sparse LLT Cholesky factorizations * * This class provides a LL^T Cholesky factorizations of sparse matrices that are * selfadjoint and positive definite. The factorization allows for solving A.X = B where * X and B can be either dense or sparse. * * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization * such that the factorized matrix is P A P^-1. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<> * * \sa class SimplicialLDLT, class AMDOrdering, class NaturalOrdering */ template class SimplicialLLT : public SimplicialCholeskyBase > { public: typedef _MatrixType MatrixType; enum { UpLo = _UpLo }; typedef SimplicialCholeskyBase Base; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; typedef internal::traits Traits; typedef typename Traits::MatrixL MatrixL; typedef typename Traits::MatrixU MatrixU; public: /** Default constructor */ SimplicialLLT() : Base() {} /** Constructs and performs the LLT factorization of \a matrix */ SimplicialLLT(const MatrixType& matrix) : Base(matrix) {} /** \returns an expression of the factor L */ inline const MatrixL matrixL() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized"); return Traits::getL(Base::m_matrix); } /** \returns an expression of the factor U (= L^*) */ inline const MatrixU matrixU() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized"); return Traits::getU(Base::m_matrix); } /** Computes the sparse Cholesky decomposition of \a matrix */ SimplicialLLT& compute(const MatrixType& matrix) { Base::template compute(matrix); return *this; } /** Performs a symbolic decomposition on the sparcity of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ void analyzePattern(const MatrixType& a) { Base::analyzePattern(a, false); } /** Performs a numeric decomposition of \a matrix * * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ void factorize(const MatrixType& a) { Base::template factorize(a); } /** \returns the determinant of the underlying matrix from the current factorization */ Scalar determinant() const { Scalar detL = Base::m_matrix.diagonal().prod(); return numext::abs2(detL); } }; /** \ingroup SparseCholesky_Module * \class SimplicialLDLT * \brief A direct sparse LDLT Cholesky factorizations without square root. * * This class provides a LDL^T Cholesky factorizations without square root of sparse matrices that are * selfadjoint and positive definite. The factorization allows for solving A.X = B where * X and B can be either dense or sparse. * * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization * such that the factorized matrix is P A P^-1. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<> * * \sa class SimplicialLLT, class AMDOrdering, class NaturalOrdering */ template class SimplicialLDLT : public SimplicialCholeskyBase > { public: typedef _MatrixType MatrixType; enum { UpLo = _UpLo }; typedef SimplicialCholeskyBase Base; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; typedef internal::traits Traits; typedef typename Traits::MatrixL MatrixL; typedef typename Traits::MatrixU MatrixU; public: /** Default constructor */ SimplicialLDLT() : Base() {} /** Constructs and performs the LLT factorization of \a matrix */ SimplicialLDLT(const MatrixType& matrix) : Base(matrix) {} /** \returns a vector expression of the diagonal D */ inline const VectorType vectorD() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized"); return Base::m_diag; } /** \returns an expression of the factor L */ inline const MatrixL matrixL() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized"); return Traits::getL(Base::m_matrix); } /** \returns an expression of the factor U (= L^*) */ inline const MatrixU matrixU() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized"); return Traits::getU(Base::m_matrix); } /** Computes the sparse Cholesky decomposition of \a matrix */ SimplicialLDLT& compute(const MatrixType& matrix) { Base::template compute(matrix); return *this; } /** Performs a symbolic decomposition on the sparcity of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ void analyzePattern(const MatrixType& a) { Base::analyzePattern(a, true); } /** Performs a numeric decomposition of \a matrix * * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ void factorize(const MatrixType& a) { Base::template factorize(a); } /** \returns the determinant of the underlying matrix from the current factorization */ Scalar determinant() const { return Base::m_diag.prod(); } }; /** \deprecated use SimplicialLDLT or class SimplicialLLT * \ingroup SparseCholesky_Module * \class SimplicialCholesky * * \sa class SimplicialLDLT, class SimplicialLLT */ template class SimplicialCholesky : public SimplicialCholeskyBase > { public: typedef _MatrixType MatrixType; enum { UpLo = _UpLo }; typedef SimplicialCholeskyBase Base; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; typedef internal::traits Traits; typedef internal::traits > LDLTTraits; typedef internal::traits > LLTTraits; public: SimplicialCholesky() : Base(), m_LDLT(true) {} SimplicialCholesky(const MatrixType& matrix) : Base(), m_LDLT(true) { compute(matrix); } SimplicialCholesky& setMode(SimplicialCholeskyMode mode) { switch(mode) { case SimplicialCholeskyLLT: m_LDLT = false; break; case SimplicialCholeskyLDLT: m_LDLT = true; break; default: break; } return *this; } inline const VectorType vectorD() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized"); return Base::m_diag; } inline const CholMatrixType rawMatrix() const { eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized"); return Base::m_matrix; } /** Computes the sparse Cholesky decomposition of \a matrix */ SimplicialCholesky& compute(const MatrixType& matrix) { if(m_LDLT) Base::template compute(matrix); else Base::template compute(matrix); return *this; } /** Performs a symbolic decomposition on the sparcity of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ void analyzePattern(const MatrixType& a) { Base::analyzePattern(a, m_LDLT); } /** Performs a numeric decomposition of \a matrix * * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ void factorize(const MatrixType& a) { if(m_LDLT) Base::template factorize(a); else Base::template factorize(a); } /** \internal */ template void _solve(const MatrixBase &b, MatrixBase &dest) const { eigen_assert(Base::m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); eigen_assert(Base::m_matrix.rows()==b.rows()); if(Base::m_info!=Success) return; if(Base::m_P.size()>0) dest = Base::m_P * b; else dest = b; if(Base::m_matrix.nonZeros()>0) // otherwise L==I { if(m_LDLT) LDLTTraits::getL(Base::m_matrix).solveInPlace(dest); else LLTTraits::getL(Base::m_matrix).solveInPlace(dest); } if(Base::m_diag.size()>0) dest = Base::m_diag.asDiagonal().inverse() * dest; if (Base::m_matrix.nonZeros()>0) // otherwise I==I { if(m_LDLT) LDLTTraits::getU(Base::m_matrix).solveInPlace(dest); else LLTTraits::getU(Base::m_matrix).solveInPlace(dest); } if(Base::m_P.size()>0) dest = Base::m_Pinv * dest; } Scalar determinant() const { if(m_LDLT) { return Base::m_diag.prod(); } else { Scalar detL = Diagonal(Base::m_matrix).prod(); return numext::abs2(detL); } } protected: bool m_LDLT; }; template void SimplicialCholeskyBase::ordering(const MatrixType& a, CholMatrixType& ap) { eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); // Note that amd compute the inverse permutation { CholMatrixType C; C = a.template selfadjointView(); OrderingType ordering; ordering(C,m_Pinv); } if(m_Pinv.size()>0) m_P = m_Pinv.inverse(); else m_P.resize(0); ap.resize(size,size); ap.template selfadjointView() = a.template selfadjointView().twistedBy(m_P); } namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef SimplicialCholeskyBase Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { dec().derived()._solve(rhs(),dst); } }; template struct sparse_solve_retval, Rhs> : sparse_solve_retval_base, Rhs> { typedef SimplicialCholeskyBase Dec; EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { this->defaultEvalTo(dst); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_SIMPLICIAL_CHOLESKY_H RcppEigen/inst/include/Eigen/src/Geometry/0000755000176200001440000000000012647570720020151 5ustar liggesusersRcppEigen/inst/include/Eigen/src/Geometry/EulerAngles.h0000644000176200001440000000665112305236565022535 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EULERANGLES_H #define EIGEN_EULERANGLES_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * * \returns the Euler-angles of the rotation matrix \c *this using the convention defined by the triplet (\a a0,\a a1,\a a2) * * Each of the three parameters \a a0,\a a1,\a a2 represents the respective rotation axis as an integer in {0,1,2}. * For instance, in: * \code Vector3f ea = mat.eulerAngles(2, 0, 2); \endcode * "2" represents the z axis and "0" the x axis, etc. The returned angles are such that * we have the following equality: * \code * mat == AngleAxisf(ea[0], Vector3f::UnitZ()) * * AngleAxisf(ea[1], Vector3f::UnitX()) * * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode * This corresponds to the right-multiply conventions (with right hand side frames). * * The returned angles are in the ranges [0:pi]x[-pi:pi]x[-pi:pi]. * * \sa class AngleAxis */ template inline Matrix::Scalar,3,1> MatrixBase::eulerAngles(Index a0, Index a1, Index a2) const { using std::atan2; using std::sin; using std::cos; /* Implemented from Graphics Gems IV */ EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3) Matrix res; typedef Matrix Vector2; const Index odd = ((a0+1)%3 == a1) ? 0 : 1; const Index i = a0; const Index j = (a0 + 1 + odd)%3; const Index k = (a0 + 2 - odd)%3; if (a0==a2) { res[0] = atan2(coeff(j,i), coeff(k,i)); if((odd && res[0]Scalar(0))) { res[0] = (res[0] > Scalar(0)) ? res[0] - Scalar(M_PI) : res[0] + Scalar(M_PI); Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm(); res[1] = -atan2(s2, coeff(i,i)); } else { Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm(); res[1] = atan2(s2, coeff(i,i)); } // With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles, // we can compute their respective rotation, and apply its inverse to M. Since the result must // be a rotation around x, we have: // // c2 s1.s2 c1.s2 1 0 0 // 0 c1 -s1 * M = 0 c3 s3 // -s2 s1.c2 c1.c2 0 -s3 c3 // // Thus: m11.c1 - m21.s1 = c3 & m12.c1 - m22.s1 = s3 Scalar s1 = sin(res[0]); Scalar c1 = cos(res[0]); res[2] = atan2(c1*coeff(j,k)-s1*coeff(k,k), c1*coeff(j,j) - s1 * coeff(k,j)); } else { res[0] = atan2(coeff(j,k), coeff(k,k)); Scalar c2 = Vector2(coeff(i,i), coeff(i,j)).norm(); if((odd && res[0]Scalar(0))) { res[0] = (res[0] > Scalar(0)) ? res[0] - Scalar(M_PI) : res[0] + Scalar(M_PI); res[1] = atan2(-coeff(i,k), -c2); } else res[1] = atan2(-coeff(i,k), c2); Scalar s1 = sin(res[0]); Scalar c1 = cos(res[0]); res[2] = atan2(s1*coeff(k,i)-c1*coeff(j,i), c1*coeff(j,j) - s1 * coeff(k,j)); } if (!odd) res = -res; return res; } } // end namespace Eigen #endif // EIGEN_EULERANGLES_H RcppEigen/inst/include/Eigen/src/Geometry/Rotation2D.h0000644000176200001440000001216112472774202022305 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ROTATION2D_H #define EIGEN_ROTATION2D_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Rotation2D * * \brief Represents a rotation/orientation in a 2 dimensional space. * * \param _Scalar the scalar type, i.e., the type of the coefficients * * This class is equivalent to a single scalar representing a counter clock wise rotation * as a single angle in radian. It provides some additional features such as the automatic * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar * interface to Quaternion in order to facilitate the writing of generic algorithms * dealing with rotations. * * \sa class Quaternion, class Transform */ namespace internal { template struct traits > { typedef _Scalar Scalar; }; } // end namespace internal template class Rotation2D : public RotationBase,2> { typedef RotationBase,2> Base; public: using Base::operator*; enum { Dim = 2 }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; typedef Matrix Vector2; typedef Matrix Matrix2; protected: Scalar m_angle; public: /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */ inline Rotation2D(const Scalar& a) : m_angle(a) {} /** Default constructor wihtout initialization. The represented rotation is undefined. */ Rotation2D() {} /** \returns the rotation angle */ inline Scalar angle() const { return m_angle; } /** \returns a read-write reference to the rotation angle */ inline Scalar& angle() { return m_angle; } /** \returns the inverse rotation */ inline Rotation2D inverse() const { return -m_angle; } /** Concatenates two rotations */ inline Rotation2D operator*(const Rotation2D& other) const { return m_angle + other.m_angle; } /** Concatenates two rotations */ inline Rotation2D& operator*=(const Rotation2D& other) { m_angle += other.m_angle; return *this; } /** Applies the rotation to a 2D vector */ Vector2 operator* (const Vector2& vec) const { return toRotationMatrix() * vec; } template Rotation2D& fromRotationMatrix(const MatrixBase& m); Matrix2 toRotationMatrix() const; /** \returns the spherical interpolation between \c *this and \a other using * parameter \a t. It is in fact equivalent to a linear interpolation. */ inline Rotation2D slerp(const Scalar& t, const Rotation2D& other) const { return m_angle * (1-t) + other.angle() * t; } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit Rotation2D(const Rotation2D& other) { m_angle = Scalar(other.angle()); } static inline Rotation2D Identity() { return Rotation2D(0); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const Rotation2D& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const { return internal::isApprox(m_angle,other.m_angle, prec); } }; /** \ingroup Geometry_Module * single precision 2D rotation type */ typedef Rotation2D Rotation2Df; /** \ingroup Geometry_Module * double precision 2D rotation type */ typedef Rotation2D Rotation2Dd; /** Set \c *this from a 2x2 rotation matrix \a mat. * In other words, this function extract the rotation angle * from the rotation matrix. */ template template Rotation2D& Rotation2D::fromRotationMatrix(const MatrixBase& mat) { using std::atan2; EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE) m_angle = atan2(mat.coeff(1,0), mat.coeff(0,0)); return *this; } /** Constructs and \returns an equivalent 2x2 rotation matrix. */ template typename Rotation2D::Matrix2 Rotation2D::toRotationMatrix(void) const { using std::sin; using std::cos; Scalar sinA = sin(m_angle); Scalar cosA = cos(m_angle); return (Matrix2() << cosA, -sinA, sinA, cosA).finished(); } } // end namespace Eigen #endif // EIGEN_ROTATION2D_H RcppEigen/inst/include/Eigen/src/Geometry/Hyperplane.h0000644000176200001440000002623512446006401022424 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HYPERPLANE_H #define EIGEN_HYPERPLANE_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Hyperplane * * \brief A hyperplane * * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n. * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane. * * \param _Scalar the scalar type, i.e., the type of the coefficients * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. * Notice that the dimension of the hyperplane is _AmbientDim-1. * * This class represents an hyperplane as the zero set of the implicit equation * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part) * and \f$ d \f$ is the distance (offset) to the origin. */ template class Hyperplane { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1) enum { AmbientDimAtCompileTime = _AmbientDim, Options = _Options }; typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef DenseIndex Index; typedef Matrix VectorType; typedef Matrix Coefficients; typedef Block NormalReturnType; typedef const Block ConstNormalReturnType; /** Default constructor without initialization */ inline Hyperplane() {} template Hyperplane(const Hyperplane& other) : m_coeffs(other.coeffs()) {} /** Constructs a dynamic-size hyperplane with \a _dim the dimension * of the ambient space */ inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {} /** Construct a plane from its normal \a n and a point \a e onto the plane. * \warning the vector normal is assumed to be normalized. */ inline Hyperplane(const VectorType& n, const VectorType& e) : m_coeffs(n.size()+1) { normal() = n; offset() = -n.dot(e); } /** Constructs a plane from its normal \a n and distance to the origin \a d * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$. * \warning the vector normal is assumed to be normalized. */ inline Hyperplane(const VectorType& n, const Scalar& d) : m_coeffs(n.size()+1) { normal() = n; offset() = d; } /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made. */ static inline Hyperplane Through(const VectorType& p0, const VectorType& p1) { Hyperplane result(p0.size()); result.normal() = (p1 - p0).unitOrthogonal(); result.offset() = -p0.dot(result.normal()); return result; } /** Constructs a hyperplane passing through the three points. The dimension of the ambient space * is required to be exactly 3. */ static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3) Hyperplane result(p0.size()); VectorType v0(p2 - p0), v1(p1 - p0); result.normal() = v0.cross(v1); RealScalar norm = result.normal().norm(); if(norm <= v0.norm() * v1.norm() * NumTraits::epsilon()) { Matrix m; m << v0.transpose(), v1.transpose(); JacobiSVD > svd(m, ComputeFullV); result.normal() = svd.matrixV().col(2); } else result.normal() /= norm; result.offset() = -p0.dot(result.normal()); return result; } /** Constructs a hyperplane passing through the parametrized line \a parametrized. * If the dimension of the ambient space is greater than 2, then there isn't uniqueness, * so an arbitrary choice is made. */ // FIXME to be consitent with the rest this could be implemented as a static Through function ?? explicit Hyperplane(const ParametrizedLine& parametrized) { normal() = parametrized.direction().unitOrthogonal(); offset() = -parametrized.origin().dot(normal()); } ~Hyperplane() {} /** \returns the dimension in which the plane holds */ inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); } /** normalizes \c *this */ void normalize(void) { m_coeffs /= normal().norm(); } /** \returns the signed distance between the plane \c *this and a point \a p. * \sa absDistance() */ inline Scalar signedDistance(const VectorType& p) const { return normal().dot(p) + offset(); } /** \returns the absolute distance between the plane \c *this and a point \a p. * \sa signedDistance() */ inline Scalar absDistance(const VectorType& p) const { using std::abs; return abs(signedDistance(p)); } /** \returns the projection of a point \a p onto the plane \c *this. */ inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); } /** \returns a constant reference to the unit normal vector of the plane, which corresponds * to the linear part of the implicit equation. */ inline ConstNormalReturnType normal() const { return ConstNormalReturnType(m_coeffs,0,0,dim(),1); } /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds * to the linear part of the implicit equation. */ inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); } /** \returns the distance to the origin, which is also the "constant term" of the implicit equation * \warning the vector normal is assumed to be normalized. */ inline const Scalar& offset() const { return m_coeffs.coeff(dim()); } /** \returns a non-constant reference to the distance to the origin, which is also the constant part * of the implicit equation */ inline Scalar& offset() { return m_coeffs(dim()); } /** \returns a constant reference to the coefficients c_i of the plane equation: * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$ */ inline const Coefficients& coeffs() const { return m_coeffs; } /** \returns a non-constant reference to the coefficients c_i of the plane equation: * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$ */ inline Coefficients& coeffs() { return m_coeffs; } /** \returns the intersection of *this with \a other. * * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines. * * \note If \a other is approximately parallel to *this, this method will return any point on *this. */ VectorType intersection(const Hyperplane& other) const { using std::abs; EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0); // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests // whether the two lines are approximately parallel. if(internal::isMuchSmallerThan(det, Scalar(1))) { // special case where the two lines are approximately parallel. Pick any point on the first line. if(abs(coeffs().coeff(1))>abs(coeffs().coeff(0))) return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0)); else return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0)); } else { // general case Scalar invdet = Scalar(1) / det; return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)), invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2))); } } /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this. * * \param mat the Dim x Dim transformation matrix * \param traits specifies whether the matrix \a mat represents an #Isometry * or a more generic #Affine transformation. The default is #Affine. */ template inline Hyperplane& transform(const MatrixBase& mat, TransformTraits traits = Affine) { if (traits==Affine) normal() = mat.inverse().transpose() * normal(); else if (traits==Isometry) normal() = mat * normal(); else { eigen_assert(0 && "invalid traits value in Hyperplane::transform()"); } return *this; } /** Applies the transformation \a t to \c *this and returns a reference to \c *this. * * \param t the transformation of dimension Dim * \param traits specifies whether the transformation \a t represents an #Isometry * or a more generic #Affine transformation. The default is #Affine. * Other kind of transformations are not supported. */ template inline Hyperplane& transform(const Transform& t, TransformTraits traits = Affine) { transform(t.linear(), traits); offset() -= normal().dot(t.translation()); return *this; } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit Hyperplane(const Hyperplane& other) { m_coeffs = other.coeffs().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ template bool isApprox(const Hyperplane& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const { return m_coeffs.isApprox(other.m_coeffs, prec); } protected: Coefficients m_coeffs; }; } // end namespace Eigen #endif // EIGEN_HYPERPLANE_H RcppEigen/inst/include/Eigen/src/Geometry/Quaternion.h0000644000176200001440000007120112647570720022450 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // Copyright (C) 2009 Mathieu Gautier // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_QUATERNION_H #define EIGEN_QUATERNION_H namespace Eigen { /*************************************************************************** * Definition of QuaternionBase * The implementation is at the end of the file ***************************************************************************/ namespace internal { template struct quaternionbase_assign_impl; } /** \geometry_module \ingroup Geometry_Module * \class QuaternionBase * \brief Base class for quaternion expressions * \tparam Derived derived type (CRTP) * \sa class Quaternion */ template class QuaternionBase : public RotationBase { typedef RotationBase Base; public: using Base::operator*; using Base::derived; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename internal::traits::Coefficients Coefficients; enum { Flags = Eigen::internal::traits::Flags }; // typedef typename Matrix Coefficients; /** the type of a 3D vector */ typedef Matrix Vector3; /** the equivalent rotation matrix type */ typedef Matrix Matrix3; /** the equivalent angle-axis type */ typedef AngleAxis AngleAxisType; /** \returns the \c x coefficient */ inline Scalar x() const { return this->derived().coeffs().coeff(0); } /** \returns the \c y coefficient */ inline Scalar y() const { return this->derived().coeffs().coeff(1); } /** \returns the \c z coefficient */ inline Scalar z() const { return this->derived().coeffs().coeff(2); } /** \returns the \c w coefficient */ inline Scalar w() const { return this->derived().coeffs().coeff(3); } /** \returns a reference to the \c x coefficient */ inline Scalar& x() { return this->derived().coeffs().coeffRef(0); } /** \returns a reference to the \c y coefficient */ inline Scalar& y() { return this->derived().coeffs().coeffRef(1); } /** \returns a reference to the \c z coefficient */ inline Scalar& z() { return this->derived().coeffs().coeffRef(2); } /** \returns a reference to the \c w coefficient */ inline Scalar& w() { return this->derived().coeffs().coeffRef(3); } /** \returns a read-only vector expression of the imaginary part (x,y,z) */ inline const VectorBlock vec() const { return coeffs().template head<3>(); } /** \returns a vector expression of the imaginary part (x,y,z) */ inline VectorBlock vec() { return coeffs().template head<3>(); } /** \returns a read-only vector expression of the coefficients (x,y,z,w) */ inline const typename internal::traits::Coefficients& coeffs() const { return derived().coeffs(); } /** \returns a vector expression of the coefficients (x,y,z,w) */ inline typename internal::traits::Coefficients& coeffs() { return derived().coeffs(); } EIGEN_STRONG_INLINE QuaternionBase& operator=(const QuaternionBase& other); template EIGEN_STRONG_INLINE Derived& operator=(const QuaternionBase& other); // disabled this copy operator as it is giving very strange compilation errors when compiling // test_stdvector with GCC 4.4.2. This looks like a GCC bug though, so feel free to re-enable it if it's // useful; however notice that we already have the templated operator= above and e.g. in MatrixBase // we didn't have to add, in addition to templated operator=, such a non-templated copy operator. // Derived& operator=(const QuaternionBase& other) // { return operator=(other); } Derived& operator=(const AngleAxisType& aa); template Derived& operator=(const MatrixBase& m); /** \returns a quaternion representing an identity rotation * \sa MatrixBase::Identity() */ static inline Quaternion Identity() { return Quaternion(Scalar(1), Scalar(0), Scalar(0), Scalar(0)); } /** \sa QuaternionBase::Identity(), MatrixBase::setIdentity() */ inline QuaternionBase& setIdentity() { coeffs() << Scalar(0), Scalar(0), Scalar(0), Scalar(1); return *this; } /** \returns the squared norm of the quaternion's coefficients * \sa QuaternionBase::norm(), MatrixBase::squaredNorm() */ inline Scalar squaredNorm() const { return coeffs().squaredNorm(); } /** \returns the norm of the quaternion's coefficients * \sa QuaternionBase::squaredNorm(), MatrixBase::norm() */ inline Scalar norm() const { return coeffs().norm(); } /** Normalizes the quaternion \c *this * \sa normalized(), MatrixBase::normalize() */ inline void normalize() { coeffs().normalize(); } /** \returns a normalized copy of \c *this * \sa normalize(), MatrixBase::normalized() */ inline Quaternion normalized() const { return Quaternion(coeffs().normalized()); } /** \returns the dot product of \c *this and \a other * Geometrically speaking, the dot product of two unit quaternions * corresponds to the cosine of half the angle between the two rotations. * \sa angularDistance() */ template inline Scalar dot(const QuaternionBase& other) const { return coeffs().dot(other.coeffs()); } template Scalar angularDistance(const QuaternionBase& other) const; /** \returns an equivalent 3x3 rotation matrix */ Matrix3 toRotationMatrix() const; /** \returns the quaternion which transform \a a into \a b through a rotation */ template Derived& setFromTwoVectors(const MatrixBase& a, const MatrixBase& b); template EIGEN_STRONG_INLINE Quaternion operator* (const QuaternionBase& q) const; template EIGEN_STRONG_INLINE Derived& operator*= (const QuaternionBase& q); /** \returns the quaternion describing the inverse rotation */ Quaternion inverse() const; /** \returns the conjugated quaternion */ Quaternion conjugate() const; template Quaternion slerp(const Scalar& t, const QuaternionBase& other) const; /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ template bool isApprox(const QuaternionBase& other, const RealScalar& prec = NumTraits::dummy_precision()) const { return coeffs().isApprox(other.coeffs(), prec); } /** return the result vector of \a v through the rotation*/ EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const; /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(derived()); } #ifdef EIGEN_QUATERNIONBASE_PLUGIN # include EIGEN_QUATERNIONBASE_PLUGIN #endif }; /*************************************************************************** * Definition/implementation of Quaternion ***************************************************************************/ /** \geometry_module \ingroup Geometry_Module * * \class Quaternion * * \brief The quaternion class used to represent 3D orientations and rotations * * \tparam _Scalar the scalar type, i.e., the type of the coefficients * \tparam _Options controls the memory alignment of the coefficients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign. * * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of * orientations and rotations of objects in three dimensions. Compared to other representations * like Euler angles or 3x3 matrices, quaternions offer the following advantages: * \li \b compact storage (4 scalars) * \li \b efficient to compose (28 flops), * \li \b stable spherical interpolation * * The following two typedefs are provided for convenience: * \li \c Quaternionf for \c float * \li \c Quaterniond for \c double * * \warning Operations interpreting the quaternion as rotation have undefined behavior if the quaternion is not normalized. * * \sa class AngleAxis, class Transform */ namespace internal { template struct traits > { typedef Quaternion<_Scalar,_Options> PlainObject; typedef _Scalar Scalar; typedef Matrix<_Scalar,4,1,_Options> Coefficients; enum{ IsAligned = internal::traits::Flags & AlignedBit, Flags = IsAligned ? (AlignedBit | LvalueBit) : LvalueBit }; }; } template class Quaternion : public QuaternionBase > { typedef QuaternionBase > Base; enum { IsAligned = internal::traits::IsAligned }; public: typedef _Scalar Scalar; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion) using Base::operator*=; typedef typename internal::traits::Coefficients Coefficients; typedef typename Base::AngleAxisType AngleAxisType; /** Default constructor leaving the quaternion uninitialized. */ inline Quaternion() {} /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from * its four coefficients \a w, \a x, \a y and \a z. * * \warning Note the order of the arguments: the real \a w coefficient first, * while internally the coefficients are stored in the following order: * [\c x, \c y, \c z, \c w] */ inline Quaternion(const Scalar& w, const Scalar& x, const Scalar& y, const Scalar& z) : m_coeffs(x, y, z, w){} /** Constructs and initialize a quaternion from the array data */ inline Quaternion(const Scalar* data) : m_coeffs(data) {} /** Copy constructor */ template EIGEN_STRONG_INLINE Quaternion(const QuaternionBase& other) { this->Base::operator=(other); } /** Constructs and initializes a quaternion from the angle-axis \a aa */ explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; } /** Constructs and initializes a quaternion from either: * - a rotation matrix expression, * - a 4D vector expression representing quaternion coefficients. */ template explicit inline Quaternion(const MatrixBase& other) { *this = other; } /** Explicit copy constructor with scalar conversion */ template explicit inline Quaternion(const Quaternion& other) { m_coeffs = other.coeffs().template cast(); } template static Quaternion FromTwoVectors(const MatrixBase& a, const MatrixBase& b); inline Coefficients& coeffs() { return m_coeffs;} inline const Coefficients& coeffs() const { return m_coeffs;} EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(IsAligned) protected: Coefficients m_coeffs; #ifndef EIGEN_PARSED_BY_DOXYGEN static EIGEN_STRONG_INLINE void _check_template_params() { EIGEN_STATIC_ASSERT( (_Options & DontAlign) == _Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) } #endif }; /** \ingroup Geometry_Module * single precision quaternion type */ typedef Quaternion Quaternionf; /** \ingroup Geometry_Module * double precision quaternion type */ typedef Quaternion Quaterniond; /*************************************************************************** * Specialization of Map> ***************************************************************************/ namespace internal { template struct traits, _Options> > : traits > { typedef Map, _Options> Coefficients; }; } namespace internal { template struct traits, _Options> > : traits > { typedef Map, _Options> Coefficients; typedef traits > TraitsBase; enum { Flags = TraitsBase::Flags & ~LvalueBit }; }; } /** \ingroup Geometry_Module * \brief Quaternion expression mapping a constant memory buffer * * \tparam _Scalar the type of the Quaternion coefficients * \tparam _Options see class Map * * This is a specialization of class Map for Quaternion. This class allows to view * a 4 scalar memory buffer as an Eigen's Quaternion object. * * \sa class Map, class Quaternion, class QuaternionBase */ template class Map, _Options > : public QuaternionBase, _Options> > { typedef QuaternionBase, _Options> > Base; public: typedef _Scalar Scalar; typedef typename internal::traits::Coefficients Coefficients; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) using Base::operator*=; /** Constructs a Mapped Quaternion object from the pointer \a coeffs * * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order: * \code *coeffs == {x, y, z, w} \endcode * * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */ EIGEN_STRONG_INLINE Map(const Scalar* coeffs) : m_coeffs(coeffs) {} inline const Coefficients& coeffs() const { return m_coeffs;} protected: const Coefficients m_coeffs; }; /** \ingroup Geometry_Module * \brief Expression of a quaternion from a memory buffer * * \tparam _Scalar the type of the Quaternion coefficients * \tparam _Options see class Map * * This is a specialization of class Map for Quaternion. This class allows to view * a 4 scalar memory buffer as an Eigen's Quaternion object. * * \sa class Map, class Quaternion, class QuaternionBase */ template class Map, _Options > : public QuaternionBase, _Options> > { typedef QuaternionBase, _Options> > Base; public: typedef _Scalar Scalar; typedef typename internal::traits::Coefficients Coefficients; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) using Base::operator*=; /** Constructs a Mapped Quaternion object from the pointer \a coeffs * * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order: * \code *coeffs == {x, y, z, w} \endcode * * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */ EIGEN_STRONG_INLINE Map(Scalar* coeffs) : m_coeffs(coeffs) {} inline Coefficients& coeffs() { return m_coeffs; } inline const Coefficients& coeffs() const { return m_coeffs; } protected: Coefficients m_coeffs; }; /** \ingroup Geometry_Module * Map an unaligned array of single precision scalars as a quaternion */ typedef Map, 0> QuaternionMapf; /** \ingroup Geometry_Module * Map an unaligned array of double precision scalars as a quaternion */ typedef Map, 0> QuaternionMapd; /** \ingroup Geometry_Module * Map a 16-byte aligned array of single precision scalars as a quaternion */ typedef Map, Aligned> QuaternionMapAlignedf; /** \ingroup Geometry_Module * Map a 16-byte aligned array of double precision scalars as a quaternion */ typedef Map, Aligned> QuaternionMapAlignedd; /*************************************************************************** * Implementation of QuaternionBase methods ***************************************************************************/ // Generic Quaternion * Quaternion product // This product can be specialized for a given architecture via the Arch template argument. namespace internal { template struct quat_product { static EIGEN_STRONG_INLINE Quaternion run(const QuaternionBase& a, const QuaternionBase& b){ return Quaternion ( a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(), a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(), a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(), a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x() ); } }; } /** \returns the concatenation of two rotations as a quaternion-quaternion product */ template template EIGEN_STRONG_INLINE Quaternion::Scalar> QuaternionBase::operator* (const QuaternionBase& other) const { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) return internal::quat_product::Scalar, internal::traits::IsAligned && internal::traits::IsAligned>::run(*this, other); } /** \sa operator*(Quaternion) */ template template EIGEN_STRONG_INLINE Derived& QuaternionBase::operator*= (const QuaternionBase& other) { derived() = derived() * other.derived(); return derived(); } /** Rotation of a vector by a quaternion. * \remarks If the quaternion is used to rotate several points (>1) * then it is much more efficient to first convert it to a 3x3 Matrix. * Comparison of the operation cost for n transformations: * - Quaternion2: 30n * - Via a Matrix3: 24 + 15n */ template EIGEN_STRONG_INLINE typename QuaternionBase::Vector3 QuaternionBase::_transformVector(const Vector3& v) const { // Note that this algorithm comes from the optimization by hand // of the conversion to a Matrix followed by a Matrix/Vector product. // It appears to be much faster than the common algorithm found // in the literature (30 versus 39 flops). It also requires two // Vector3 as temporaries. Vector3 uv = this->vec().cross(v); uv += uv; return v + this->w() * uv + this->vec().cross(uv); } template EIGEN_STRONG_INLINE QuaternionBase& QuaternionBase::operator=(const QuaternionBase& other) { coeffs() = other.coeffs(); return derived(); } template template EIGEN_STRONG_INLINE Derived& QuaternionBase::operator=(const QuaternionBase& other) { coeffs() = other.coeffs(); return derived(); } /** Set \c *this from an angle-axis \a aa and returns a reference to \c *this */ template EIGEN_STRONG_INLINE Derived& QuaternionBase::operator=(const AngleAxisType& aa) { using std::cos; using std::sin; Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings this->w() = cos(ha); this->vec() = sin(ha) * aa.axis(); return derived(); } /** Set \c *this from the expression \a xpr: * - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion * - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix * and \a xpr is converted to a quaternion */ template template inline Derived& QuaternionBase::operator=(const MatrixBase& xpr) { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) internal::quaternionbase_assign_impl::run(*this, xpr.derived()); return derived(); } /** Convert the quaternion to a 3x3 rotation matrix. The quaternion is required to * be normalized, otherwise the result is undefined. */ template inline typename QuaternionBase::Matrix3 QuaternionBase::toRotationMatrix(void) const { // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!) // if not inlined then the cost of the return by value is huge ~ +35%, // however, not inlining this function is an order of magnitude slower, so // it has to be inlined, and so the return by value is not an issue Matrix3 res; const Scalar tx = Scalar(2)*this->x(); const Scalar ty = Scalar(2)*this->y(); const Scalar tz = Scalar(2)*this->z(); const Scalar twx = tx*this->w(); const Scalar twy = ty*this->w(); const Scalar twz = tz*this->w(); const Scalar txx = tx*this->x(); const Scalar txy = ty*this->x(); const Scalar txz = tz*this->x(); const Scalar tyy = ty*this->y(); const Scalar tyz = tz*this->y(); const Scalar tzz = tz*this->z(); res.coeffRef(0,0) = Scalar(1)-(tyy+tzz); res.coeffRef(0,1) = txy-twz; res.coeffRef(0,2) = txz+twy; res.coeffRef(1,0) = txy+twz; res.coeffRef(1,1) = Scalar(1)-(txx+tzz); res.coeffRef(1,2) = tyz-twx; res.coeffRef(2,0) = txz-twy; res.coeffRef(2,1) = tyz+twx; res.coeffRef(2,2) = Scalar(1)-(txx+tyy); return res; } /** Sets \c *this to be a quaternion representing a rotation between * the two arbitrary vectors \a a and \a b. In other words, the built * rotation represent a rotation sending the line of direction \a a * to the line of direction \a b, both lines passing through the origin. * * \returns a reference to \c *this. * * Note that the two input vectors do \b not have to be normalized, and * do not need to have the same norm. */ template template inline Derived& QuaternionBase::setFromTwoVectors(const MatrixBase& a, const MatrixBase& b) { using std::max; using std::sqrt; Vector3 v0 = a.normalized(); Vector3 v1 = b.normalized(); Scalar c = v1.dot(v0); // if dot == -1, vectors are nearly opposites // => accurately compute the rotation axis by computing the // intersection of the two planes. This is done by solving: // x^T v0 = 0 // x^T v1 = 0 // under the constraint: // ||x|| = 1 // which yields a singular value problem if (c < Scalar(-1)+NumTraits::dummy_precision()) { c = (max)(c,Scalar(-1)); Matrix m; m << v0.transpose(), v1.transpose(); JacobiSVD > svd(m, ComputeFullV); Vector3 axis = svd.matrixV().col(2); Scalar w2 = (Scalar(1)+c)*Scalar(0.5); this->w() = sqrt(w2); this->vec() = axis * sqrt(Scalar(1) - w2); return derived(); } Vector3 axis = v0.cross(v1); Scalar s = sqrt((Scalar(1)+c)*Scalar(2)); Scalar invs = Scalar(1)/s; this->vec() = axis * invs; this->w() = s * Scalar(0.5); return derived(); } /** Returns a quaternion representing a rotation between * the two arbitrary vectors \a a and \a b. In other words, the built * rotation represent a rotation sending the line of direction \a a * to the line of direction \a b, both lines passing through the origin. * * \returns resulting quaternion * * Note that the two input vectors do \b not have to be normalized, and * do not need to have the same norm. */ template template Quaternion Quaternion::FromTwoVectors(const MatrixBase& a, const MatrixBase& b) { Quaternion quat; quat.setFromTwoVectors(a, b); return quat; } /** \returns the multiplicative inverse of \c *this * Note that in most cases, i.e., if you simply want the opposite rotation, * and/or the quaternion is normalized, then it is enough to use the conjugate. * * \sa QuaternionBase::conjugate() */ template inline Quaternion::Scalar> QuaternionBase::inverse() const { // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ?? Scalar n2 = this->squaredNorm(); if (n2 > Scalar(0)) return Quaternion(conjugate().coeffs() / n2); else { // return an invalid result to flag the error return Quaternion(Coefficients::Zero()); } } /** \returns the conjugate of the \c *this which is equal to the multiplicative inverse * if the quaternion is normalized. * The conjugate of a quaternion represents the opposite rotation. * * \sa Quaternion2::inverse() */ template inline Quaternion::Scalar> QuaternionBase::conjugate() const { return Quaternion(this->w(),-this->x(),-this->y(),-this->z()); } /** \returns the angle (in radian) between two rotations * \sa dot() */ template template inline typename internal::traits::Scalar QuaternionBase::angularDistance(const QuaternionBase& other) const { using std::atan2; using std::abs; Quaternion d = (*this) * other.conjugate(); return Scalar(2) * atan2( d.vec().norm(), abs(d.w()) ); } /** \returns the spherical linear interpolation between the two quaternions * \c *this and \a other at the parameter \a t in [0;1]. * * This represents an interpolation for a constant motion between \c *this and \a other, * see also http://en.wikipedia.org/wiki/Slerp. */ template template Quaternion::Scalar> QuaternionBase::slerp(const Scalar& t, const QuaternionBase& other) const { using std::acos; using std::sin; using std::abs; static const Scalar one = Scalar(1) - NumTraits::epsilon(); Scalar d = this->dot(other); Scalar absD = abs(d); Scalar scale0; Scalar scale1; if(absD>=one) { scale0 = Scalar(1) - t; scale1 = t; } else { // theta is the angle between the 2 quaternions Scalar theta = acos(absD); Scalar sinTheta = sin(theta); scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta; scale1 = sin( ( t * theta) ) / sinTheta; } if(d(scale0 * coeffs() + scale1 * other.coeffs()); } namespace internal { // set from a rotation matrix template struct quaternionbase_assign_impl { typedef typename Other::Scalar Scalar; typedef DenseIndex Index; template static inline void run(QuaternionBase& q, const Other& mat) { using std::sqrt; // This algorithm comes from "Quaternion Calculus and Fast Animation", // Ken Shoemake, 1987 SIGGRAPH course notes Scalar t = mat.trace(); if (t > Scalar(0)) { t = sqrt(t + Scalar(1.0)); q.w() = Scalar(0.5)*t; t = Scalar(0.5)/t; q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t; q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t; q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t; } else { DenseIndex i = 0; if (mat.coeff(1,1) > mat.coeff(0,0)) i = 1; if (mat.coeff(2,2) > mat.coeff(i,i)) i = 2; DenseIndex j = (i+1)%3; DenseIndex k = (j+1)%3; t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0)); q.coeffs().coeffRef(i) = Scalar(0.5) * t; t = Scalar(0.5)/t; q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t; q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t; q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t; } } }; // set from a vector of coefficients assumed to be a quaternion template struct quaternionbase_assign_impl { typedef typename Other::Scalar Scalar; template static inline void run(QuaternionBase& q, const Other& vec) { q.coeffs() = vec; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_QUATERNION_H RcppEigen/inst/include/Eigen/src/Geometry/Umeyama.h0000644000176200001440000001512212375356050021715 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_UMEYAMA_H #define EIGEN_UMEYAMA_H // This file requires the user to include // * Eigen/Core // * Eigen/LU // * Eigen/SVD // * Eigen/Array namespace Eigen { #ifndef EIGEN_PARSED_BY_DOXYGEN // These helpers are required since it allows to use mixed types as parameters // for the Umeyama. The problem with mixed parameters is that the return type // cannot trivially be deduced when float and double types are mixed. namespace internal { // Compile time return type deduction for different MatrixBase types. // Different means here different alignment and parameters but the same underlying // real scalar type. template struct umeyama_transform_matrix_type { enum { MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime), // When possible we want to choose some small fixed size value since the result // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want. HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1 }; typedef Matrix::Scalar, HomogeneousDimension, HomogeneousDimension, AutoAlign | (traits::Flags & RowMajorBit ? RowMajor : ColMajor), HomogeneousDimension, HomogeneousDimension > type; }; } #endif /** * \geometry_module \ingroup Geometry_Module * * \brief Returns the transformation between two point sets. * * The algorithm is based on: * "Least-squares estimation of transformation parameters between two point patterns", * Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573 * * It estimates parameters \f$ c, \mathbf{R}, \f$ and \f$ \mathbf{t} \f$ such that * \f{align*} * \frac{1}{n} \sum_{i=1}^n \vert\vert y_i - (c\mathbf{R}x_i + \mathbf{t}) \vert\vert_2^2 * \f} * is minimized. * * The algorithm is based on the analysis of the covariance matrix * \f$ \Sigma_{\mathbf{x}\mathbf{y}} \in \mathbb{R}^{d \times d} \f$ * of the input point sets \f$ \mathbf{x} \f$ and \f$ \mathbf{y} \f$ where * \f$d\f$ is corresponding to the dimension (which is typically small). * The analysis is involving the SVD having a complexity of \f$O(d^3)\f$ * though the actual computational effort lies in the covariance * matrix computation which has an asymptotic lower bound of \f$O(dm)\f$ when * the input point sets have dimension \f$d \times m\f$. * * Currently the method is working only for floating point matrices. * * \todo Should the return type of umeyama() become a Transform? * * \param src Source points \f$ \mathbf{x} = \left( x_1, \hdots, x_n \right) \f$. * \param dst Destination points \f$ \mathbf{y} = \left( y_1, \hdots, y_n \right) \f$. * \param with_scaling Sets \f$ c=1 \f$ when false is passed. * \return The homogeneous transformation * \f{align*} * T = \begin{bmatrix} c\mathbf{R} & \mathbf{t} \\ \mathbf{0} & 1 \end{bmatrix} * \f} * minimizing the resudiual above. This transformation is always returned as an * Eigen::Matrix. */ template typename internal::umeyama_transform_matrix_type::type umeyama(const MatrixBase& src, const MatrixBase& dst, bool with_scaling = true) { typedef typename internal::umeyama_transform_matrix_type::type TransformationMatrixType; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename Derived::Index Index; EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) EIGEN_STATIC_ASSERT((internal::is_same::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) }; typedef Matrix VectorType; typedef Matrix MatrixType; typedef typename internal::plain_matrix_type_row_major::type RowMajorMatrixType; const Index m = src.rows(); // dimension const Index n = src.cols(); // number of measurements // required for demeaning ... const RealScalar one_over_n = RealScalar(1) / static_cast(n); // computation of mean const VectorType src_mean = src.rowwise().sum() * one_over_n; const VectorType dst_mean = dst.rowwise().sum() * one_over_n; // demeaning of src and dst points const RowMajorMatrixType src_demean = src.colwise() - src_mean; const RowMajorMatrixType dst_demean = dst.colwise() - dst_mean; // Eq. (36)-(37) const Scalar src_var = src_demean.rowwise().squaredNorm().sum() * one_over_n; // Eq. (38) const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose(); JacobiSVD svd(sigma, ComputeFullU | ComputeFullV); // Initialize the resulting transformation with an identity matrix... TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1); // Eq. (39) VectorType S = VectorType::Ones(m); if (sigma.determinant() Scalar(0) ) { Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose(); } else { const Scalar s = S(m-1); S(m-1) = Scalar(-1); Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose(); S(m-1) = s; } } else { Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose(); } if (with_scaling) { // Eq. (42) const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S); // Eq. (41) Rt.col(m).head(m) = dst_mean; Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean; Rt.block(0,0,m,m) *= c; } else { Rt.col(m).head(m) = dst_mean; Rt.col(m).head(m).noalias() -= Rt.topLeftCorner(m,m)*src_mean; } return Rt; } } // end namespace Eigen #endif // EIGEN_UMEYAMA_H RcppEigen/inst/include/Eigen/src/Geometry/Scaling.h0000644000176200001440000001422212253717461021701 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SCALING_H #define EIGEN_SCALING_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Scaling * * \brief Represents a generic uniform scaling transformation * * \param _Scalar the scalar type, i.e., the type of the coefficients. * * This class represent a uniform scaling transformation. It is the return * type of Scaling(Scalar), and most of the time this is the only way it * is used. In particular, this class is not aimed to be used to store a scaling transformation, * but rather to make easier the constructions and updates of Transform objects. * * To represent an axis aligned scaling, use the DiagonalMatrix class. * * \sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform */ template class UniformScaling { public: /** the scalar type of the coefficients */ typedef _Scalar Scalar; protected: Scalar m_factor; public: /** Default constructor without initialization. */ UniformScaling() {} /** Constructs and initialize a uniform scaling transformation */ explicit inline UniformScaling(const Scalar& s) : m_factor(s) {} inline const Scalar& factor() const { return m_factor; } inline Scalar& factor() { return m_factor; } /** Concatenates two uniform scaling */ inline UniformScaling operator* (const UniformScaling& other) const { return UniformScaling(m_factor * other.factor()); } /** Concatenates a uniform scaling and a translation */ template inline Transform operator* (const Translation& t) const; /** Concatenates a uniform scaling and an affine transformation */ template inline Transform operator* (const Transform& t) const { Transform res = t; res.prescale(factor()); return res; } /** Concatenates a uniform scaling and a linear transformation matrix */ // TODO returns an expression template inline typename internal::plain_matrix_type::type operator* (const MatrixBase& other) const { return other * m_factor; } template inline Matrix operator*(const RotationBase& r) const { return r.toRotationMatrix() * m_factor; } /** \returns the inverse scaling */ inline UniformScaling inverse() const { return UniformScaling(Scalar(1)/m_factor); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline UniformScaling cast() const { return UniformScaling(NewScalarType(m_factor)); } /** Copy constructor with scalar type conversion */ template inline explicit UniformScaling(const UniformScaling& other) { m_factor = Scalar(other.factor()); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const UniformScaling& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const { return internal::isApprox(m_factor, other.factor(), prec); } }; /** Concatenates a linear transformation matrix and a uniform scaling */ // NOTE this operator is defiend in MatrixBase and not as a friend function // of UniformScaling to fix an internal crash of Intel's ICC template typename MatrixBase::ScalarMultipleReturnType MatrixBase::operator*(const UniformScaling& s) const { return derived() * s.factor(); } /** Constructs a uniform scaling from scale factor \a s */ static inline UniformScaling Scaling(float s) { return UniformScaling(s); } /** Constructs a uniform scaling from scale factor \a s */ static inline UniformScaling Scaling(double s) { return UniformScaling(s); } /** Constructs a uniform scaling from scale factor \a s */ template static inline UniformScaling > Scaling(const std::complex& s) { return UniformScaling >(s); } /** Constructs a 2D axis aligned scaling */ template static inline DiagonalMatrix Scaling(const Scalar& sx, const Scalar& sy) { return DiagonalMatrix(sx, sy); } /** Constructs a 3D axis aligned scaling */ template static inline DiagonalMatrix Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz) { return DiagonalMatrix(sx, sy, sz); } /** Constructs an axis aligned scaling expression from vector expression \a coeffs * This is an alias for coeffs.asDiagonal() */ template static inline const DiagonalWrapper Scaling(const MatrixBase& coeffs) { return coeffs.asDiagonal(); } /** \addtogroup Geometry_Module */ //@{ /** \deprecated */ typedef DiagonalMatrix AlignedScaling2f; /** \deprecated */ typedef DiagonalMatrix AlignedScaling2d; /** \deprecated */ typedef DiagonalMatrix AlignedScaling3f; /** \deprecated */ typedef DiagonalMatrix AlignedScaling3d; //@} template template inline Transform UniformScaling::operator* (const Translation& t) const { Transform res; res.matrix().setZero(); res.linear().diagonal().fill(factor()); res.translation() = factor() * t.vector(); res(Dim,Dim) = Scalar(1); return res; } } // end namespace Eigen #endif // EIGEN_SCALING_H RcppEigen/inst/include/Eigen/src/Geometry/ParametrizedLine.h0000644000176200001440000001713012253717461023561 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARAMETRIZEDLINE_H #define EIGEN_PARAMETRIZEDLINE_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class ParametrizedLine * * \brief A parametrized line * * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ t \in \mathbf{R} \f$. * * \param _Scalar the scalar type, i.e., the type of the coefficients * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. */ template class ParametrizedLine { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) enum { AmbientDimAtCompileTime = _AmbientDim, Options = _Options }; typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef DenseIndex Index; typedef Matrix VectorType; /** Default constructor without initialization */ inline ParametrizedLine() {} template ParametrizedLine(const ParametrizedLine& other) : m_origin(other.origin()), m_direction(other.direction()) {} /** Constructs a dynamic-size line with \a _dim the dimension * of the ambient space */ inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {} /** Initializes a parametrized line of direction \a direction and origin \a origin. * \warning the vector direction is assumed to be normalized. */ ParametrizedLine(const VectorType& origin, const VectorType& direction) : m_origin(origin), m_direction(direction) {} template explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane); /** Constructs a parametrized line going from \a p0 to \a p1. */ static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1) { return ParametrizedLine(p0, (p1-p0).normalized()); } ~ParametrizedLine() {} /** \returns the dimension in which the line holds */ inline Index dim() const { return m_direction.size(); } const VectorType& origin() const { return m_origin; } VectorType& origin() { return m_origin; } const VectorType& direction() const { return m_direction; } VectorType& direction() { return m_direction; } /** \returns the squared distance of a point \a p to its projection onto the line \c *this. * \sa distance() */ RealScalar squaredDistance(const VectorType& p) const { VectorType diff = p - origin(); return (diff - direction().dot(diff) * direction()).squaredNorm(); } /** \returns the distance of a point \a p to its projection onto the line \c *this. * \sa squaredDistance() */ RealScalar distance(const VectorType& p) const { using std::sqrt; return sqrt(squaredDistance(p)); } /** \returns the projection of a point \a p onto the line \c *this. */ VectorType projection(const VectorType& p) const { return origin() + direction().dot(p-origin()) * direction(); } VectorType pointAt(const Scalar& t) const; template Scalar intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; template Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; template VectorType intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit ParametrizedLine(const ParametrizedLine& other) { m_origin = other.origin().template cast(); m_direction = other.direction().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const ParametrizedLine& other, typename NumTraits::Real prec = NumTraits::dummy_precision()) const { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); } protected: VectorType m_origin, m_direction; }; /** Constructs a parametrized line from a 2D hyperplane * * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line */ template template inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim,OtherOptions>& hyperplane) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) direction() = hyperplane.normal().unitOrthogonal(); origin() = -hyperplane.normal()*hyperplane.offset(); } /** \returns the point at \a t along this line */ template inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt(const _Scalar& t) const { return origin() + (direction()*t); } /** \returns the parameter value of the intersection between \c *this and the given \a hyperplane */ template template inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const { return -(hyperplane.offset()+hyperplane.normal().dot(origin())) / hyperplane.normal().dot(direction()); } /** \deprecated use intersectionParameter() * \returns the parameter value of the intersection between \c *this and the given \a hyperplane */ template template inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const { return intersectionParameter(hyperplane); } /** \returns the point of the intersection between \c *this and the given hyperplane */ template template inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const { return pointAt(intersectionParameter(hyperplane)); } } // end namespace Eigen #endif // EIGEN_PARAMETRIZEDLINE_H RcppEigen/inst/include/Eigen/src/Geometry/Homogeneous.h0000644000176200001440000002661712550462273022622 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HOMOGENEOUS_H #define EIGEN_HOMOGENEOUS_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Homogeneous * * \brief Expression of one (or a set of) homogeneous vector(s) * * \param MatrixType the type of the object in which we are making homogeneous * * This class represents an expression of one (or a set of) homogeneous vector(s). * It is the return type of MatrixBase::homogeneous() and most of the time * this is the only way it is used. * * \sa MatrixBase::homogeneous() */ namespace internal { template struct traits > : traits { typedef typename traits::StorageKind StorageKind; typedef typename nested::type MatrixTypeNested; typedef typename remove_reference::type _MatrixTypeNested; enum { RowsPlusOne = (MatrixType::RowsAtCompileTime != Dynamic) ? int(MatrixType::RowsAtCompileTime) + 1 : Dynamic, ColsPlusOne = (MatrixType::ColsAtCompileTime != Dynamic) ? int(MatrixType::ColsAtCompileTime) + 1 : Dynamic, RowsAtCompileTime = Direction==Vertical ? RowsPlusOne : MatrixType::RowsAtCompileTime, ColsAtCompileTime = Direction==Horizontal ? ColsPlusOne : MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, TmpFlags = _MatrixTypeNested::Flags & HereditaryBits, Flags = ColsAtCompileTime==1 ? (TmpFlags & ~RowMajorBit) : RowsAtCompileTime==1 ? (TmpFlags | RowMajorBit) : TmpFlags, CoeffReadCost = _MatrixTypeNested::CoeffReadCost }; }; template struct homogeneous_left_product_impl; template struct homogeneous_right_product_impl; } // end namespace internal template class Homogeneous : internal::no_assignment_operator, public MatrixBase > { public: enum { Direction = _Direction }; typedef MatrixBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(Homogeneous) inline Homogeneous(const MatrixType& matrix) : m_matrix(matrix) {} inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); } inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); } inline Scalar coeff(Index row, Index col) const { if( (int(Direction)==Vertical && row==m_matrix.rows()) || (int(Direction)==Horizontal && col==m_matrix.cols())) return Scalar(1); return m_matrix.coeff(row, col); } template inline const internal::homogeneous_right_product_impl operator* (const MatrixBase& rhs) const { eigen_assert(int(Direction)==Horizontal); return internal::homogeneous_right_product_impl(m_matrix,rhs.derived()); } template friend inline const internal::homogeneous_left_product_impl operator* (const MatrixBase& lhs, const Homogeneous& rhs) { eigen_assert(int(Direction)==Vertical); return internal::homogeneous_left_product_impl(lhs.derived(),rhs.m_matrix); } template friend inline const internal::homogeneous_left_product_impl > operator* (const Transform& lhs, const Homogeneous& rhs) { eigen_assert(int(Direction)==Vertical); return internal::homogeneous_left_product_impl >(lhs,rhs.m_matrix); } protected: typename MatrixType::Nested m_matrix; }; /** \geometry_module * * \return an expression of the equivalent homogeneous vector * * \only_for_vectors * * Example: \include MatrixBase_homogeneous.cpp * Output: \verbinclude MatrixBase_homogeneous.out * * \sa class Homogeneous */ template inline typename MatrixBase::HomogeneousReturnType MatrixBase::homogeneous() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); return derived(); } /** \geometry_module * * \returns a matrix expression of homogeneous column (or row) vectors * * Example: \include VectorwiseOp_homogeneous.cpp * Output: \verbinclude VectorwiseOp_homogeneous.out * * \sa MatrixBase::homogeneous() */ template inline Homogeneous VectorwiseOp::homogeneous() const { return _expression(); } /** \geometry_module * * \returns an expression of the homogeneous normalized vector of \c *this * * Example: \include MatrixBase_hnormalized.cpp * Output: \verbinclude MatrixBase_hnormalized.out * * \sa VectorwiseOp::hnormalized() */ template inline const typename MatrixBase::HNormalizedReturnType MatrixBase::hnormalized() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); return ConstStartMinusOne(derived(),0,0, ColsAtCompileTime==1?size()-1:1, ColsAtCompileTime==1?1:size()-1) / coeff(size()-1); } /** \geometry_module * * \returns an expression of the homogeneous normalized vector of \c *this * * Example: \include DirectionWise_hnormalized.cpp * Output: \verbinclude DirectionWise_hnormalized.out * * \sa MatrixBase::hnormalized() */ template inline const typename VectorwiseOp::HNormalizedReturnType VectorwiseOp::hnormalized() const { return HNormalized_Block(_expression(),0,0, Direction==Vertical ? _expression().rows()-1 : _expression().rows(), Direction==Horizontal ? _expression().cols()-1 : _expression().cols()).cwiseQuotient( Replicate (HNormalized_Factors(_expression(), Direction==Vertical ? _expression().rows()-1:0, Direction==Horizontal ? _expression().cols()-1:0, Direction==Vertical ? 1 : _expression().rows(), Direction==Horizontal ? 1 : _expression().cols()), Direction==Vertical ? _expression().rows()-1 : 1, Direction==Horizontal ? _expression().cols()-1 : 1)); } namespace internal { template struct take_matrix_for_product { typedef MatrixOrTransformType type; static const type& run(const type &x) { return x; } }; template struct take_matrix_for_product > { typedef Transform TransformType; typedef typename internal::add_const::type type; static type run (const TransformType& x) { return x.affine(); } }; template struct take_matrix_for_product > { typedef Transform TransformType; typedef typename TransformType::MatrixType type; static const type& run (const TransformType& x) { return x.matrix(); } }; template struct traits,Lhs> > { typedef typename take_matrix_for_product::type LhsMatrixType; typedef typename remove_all::type MatrixTypeCleaned; typedef typename remove_all::type LhsMatrixTypeCleaned; typedef typename make_proper_matrix_type< typename traits::Scalar, LhsMatrixTypeCleaned::RowsAtCompileTime, MatrixTypeCleaned::ColsAtCompileTime, MatrixTypeCleaned::PlainObject::Options, LhsMatrixTypeCleaned::MaxRowsAtCompileTime, MatrixTypeCleaned::MaxColsAtCompileTime>::type ReturnType; }; template struct homogeneous_left_product_impl,Lhs> : public ReturnByValue,Lhs> > { typedef typename traits::LhsMatrixType LhsMatrixType; typedef typename remove_all::type LhsMatrixTypeCleaned; typedef typename remove_all::type LhsMatrixTypeNested; typedef typename MatrixType::Index Index; homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs) : m_lhs(take_matrix_for_product::run(lhs)), m_rhs(rhs) {} inline Index rows() const { return m_lhs.rows(); } inline Index cols() const { return m_rhs.cols(); } template void evalTo(Dest& dst) const { // FIXME investigate how to allow lazy evaluation of this product when possible dst = Block (m_lhs,0,0,m_lhs.rows(),m_lhs.cols()-1) * m_rhs; dst += m_lhs.col(m_lhs.cols()-1).rowwise() .template replicate(m_rhs.cols()); } typename LhsMatrixTypeCleaned::Nested m_lhs; typename MatrixType::Nested m_rhs; }; template struct traits,Rhs> > { typedef typename make_proper_matrix_type::Scalar, MatrixType::RowsAtCompileTime, Rhs::ColsAtCompileTime, MatrixType::PlainObject::Options, MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime>::type ReturnType; }; template struct homogeneous_right_product_impl,Rhs> : public ReturnByValue,Rhs> > { typedef typename remove_all::type RhsNested; typedef typename MatrixType::Index Index; homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) {} inline Index rows() const { return m_lhs.rows(); } inline Index cols() const { return m_rhs.cols(); } template void evalTo(Dest& dst) const { // FIXME investigate how to allow lazy evaluation of this product when possible dst = m_lhs * Block (m_rhs,0,0,m_rhs.rows()-1,m_rhs.cols()); dst += m_rhs.row(m_rhs.rows()-1).colwise() .template replicate(m_lhs.rows()); } typename MatrixType::Nested m_lhs; typename Rhs::Nested m_rhs; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_HOMOGENEOUS_H RcppEigen/inst/include/Eigen/src/Geometry/arch/0000755000176200001440000000000012253717461021064 5ustar liggesusersRcppEigen/inst/include/Eigen/src/Geometry/arch/Geometry_SSE.h0000644000176200001440000000745012253717461023550 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Rohit Garg // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GEOMETRY_SSE_H #define EIGEN_GEOMETRY_SSE_H namespace Eigen { namespace internal { template struct quat_product { static inline Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) { const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0,0,0,0x80000000)); Quaternion res; __m128 a = _a.coeffs().template packet(0); __m128 b = _b.coeffs().template packet(0); __m128 flip1 = _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a,1,2,0,2), vec4f_swizzle1(b,2,0,1,2)),mask); __m128 flip2 = _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a,3,3,3,1), vec4f_swizzle1(b,0,1,2,1)),mask); pstore(&res.x(), _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,vec4f_swizzle1(b,3,3,3,3)), _mm_mul_ps(vec4f_swizzle1(a,2,0,1,0), vec4f_swizzle1(b,1,2,0,0))), _mm_add_ps(flip1,flip2))); return res; } }; template struct cross3_impl { static inline typename plain_matrix_type::type run(const VectorLhs& lhs, const VectorRhs& rhs) { __m128 a = lhs.template packet(0); __m128 b = rhs.template packet(0); __m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3)); __m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3)); typename plain_matrix_type::type res; pstore(&res.x(),_mm_sub_ps(mul1,mul2)); return res; } }; template struct quat_product { static inline Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) { const Packet2d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); Quaternion res; const double* a = _a.coeffs().data(); Packet2d b_xy = _b.coeffs().template packet(0); Packet2d b_zw = _b.coeffs().template packet(2); Packet2d a_xx = pset1(a[0]); Packet2d a_yy = pset1(a[1]); Packet2d a_zz = pset1(a[2]); Packet2d a_ww = pset1(a[3]); // two temporaries: Packet2d t1, t2; /* * t1 = ww*xy + yy*zw * t2 = zz*xy - xx*zw * res.xy = t1 +/- swap(t2) */ t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw)); t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw)); #ifdef EIGEN_VECTORIZE_SSE3 EIGEN_UNUSED_VARIABLE(mask) pstore(&res.x(), _mm_addsub_pd(t1, preverse(t2))); #else pstore(&res.x(), padd(t1, pxor(mask,preverse(t2)))); #endif /* * t1 = ww*zw - yy*xy * t2 = zz*zw + xx*xy * res.zw = t1 -/+ swap(t2) = swap( swap(t1) +/- t2) */ t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy)); t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy)); #ifdef EIGEN_VECTORIZE_SSE3 EIGEN_UNUSED_VARIABLE(mask) pstore(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2))); #else pstore(&res.z(), psub(t1, pxor(mask,preverse(t2)))); #endif return res; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GEOMETRY_SSE_H RcppEigen/inst/include/Eigen/src/Geometry/OrthoMethods.h0000644000176200001440000002017612253717461022745 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ORTHOMETHODS_H #define EIGEN_ORTHOMETHODS_H namespace Eigen { /** \geometry_module * * \returns the cross product of \c *this and \a other * * Here is a very good explanation of cross-product: http://xkcd.com/199/ * \sa MatrixBase::cross3() */ template template inline typename MatrixBase::template cross_product_return_type::type MatrixBase::cross(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3) EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3) // Note that there is no need for an expression here since the compiler // optimize such a small temporary very well (even within a complex expression) typename internal::nested::type lhs(derived()); typename internal::nested::type rhs(other.derived()); return typename cross_product_return_type::type( numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)), numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)), numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)) ); } namespace internal { template< int Arch,typename VectorLhs,typename VectorRhs, typename Scalar = typename VectorLhs::Scalar, bool Vectorizable = bool((VectorLhs::Flags&VectorRhs::Flags)&PacketAccessBit)> struct cross3_impl { static inline typename internal::plain_matrix_type::type run(const VectorLhs& lhs, const VectorRhs& rhs) { return typename internal::plain_matrix_type::type( numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)), numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)), numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)), 0 ); } }; } /** \geometry_module * * \returns the cross product of \c *this and \a other using only the x, y, and z coefficients * * The size of \c *this and \a other must be four. This function is especially useful * when using 4D vectors instead of 3D ones to get advantage of SSE/AltiVec vectorization. * * \sa MatrixBase::cross() */ template template inline typename MatrixBase::PlainObject MatrixBase::cross3(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,4) EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,4) typedef typename internal::nested::type DerivedNested; typedef typename internal::nested::type OtherDerivedNested; DerivedNested lhs(derived()); OtherDerivedNested rhs(other.derived()); return internal::cross3_impl::type, typename internal::remove_all::type>::run(lhs,rhs); } /** \returns a matrix expression of the cross product of each column or row * of the referenced expression with the \a other vector. * * The referenced matrix must have one dimension equal to 3. * The result matrix has the same dimensions than the referenced one. * * \geometry_module * * \sa MatrixBase::cross() */ template template const typename VectorwiseOp::CrossReturnType VectorwiseOp::cross(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3) EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) CrossReturnType res(_expression().rows(),_expression().cols()); if(Direction==Vertical) { eigen_assert(CrossReturnType::RowsAtCompileTime==3 && "the matrix must have exactly 3 rows"); res.row(0) = (_expression().row(1) * other.coeff(2) - _expression().row(2) * other.coeff(1)).conjugate(); res.row(1) = (_expression().row(2) * other.coeff(0) - _expression().row(0) * other.coeff(2)).conjugate(); res.row(2) = (_expression().row(0) * other.coeff(1) - _expression().row(1) * other.coeff(0)).conjugate(); } else { eigen_assert(CrossReturnType::ColsAtCompileTime==3 && "the matrix must have exactly 3 columns"); res.col(0) = (_expression().col(1) * other.coeff(2) - _expression().col(2) * other.coeff(1)).conjugate(); res.col(1) = (_expression().col(2) * other.coeff(0) - _expression().col(0) * other.coeff(2)).conjugate(); res.col(2) = (_expression().col(0) * other.coeff(1) - _expression().col(1) * other.coeff(0)).conjugate(); } return res; } namespace internal { template struct unitOrthogonal_selector { typedef typename plain_matrix_type::type VectorType; typedef typename traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename Derived::Index Index; typedef Matrix Vector2; static inline VectorType run(const Derived& src) { VectorType perp = VectorType::Zero(src.size()); Index maxi = 0; Index sndi = 0; src.cwiseAbs().maxCoeff(&maxi); if (maxi==0) sndi = 1; RealScalar invnm = RealScalar(1)/(Vector2() << src.coeff(sndi),src.coeff(maxi)).finished().norm(); perp.coeffRef(maxi) = -numext::conj(src.coeff(sndi)) * invnm; perp.coeffRef(sndi) = numext::conj(src.coeff(maxi)) * invnm; return perp; } }; template struct unitOrthogonal_selector { typedef typename plain_matrix_type::type VectorType; typedef typename traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; static inline VectorType run(const Derived& src) { VectorType perp; /* Let us compute the crossed product of *this with a vector * that is not too close to being colinear to *this. */ /* unless the x and y coords are both close to zero, we can * simply take ( -y, x, 0 ) and normalize it. */ if((!isMuchSmallerThan(src.x(), src.z())) || (!isMuchSmallerThan(src.y(), src.z()))) { RealScalar invnm = RealScalar(1)/src.template head<2>().norm(); perp.coeffRef(0) = -numext::conj(src.y())*invnm; perp.coeffRef(1) = numext::conj(src.x())*invnm; perp.coeffRef(2) = 0; } /* if both x and y are close to zero, then the vector is close * to the z-axis, so it's far from colinear to the x-axis for instance. * So we take the crossed product with (1,0,0) and normalize it. */ else { RealScalar invnm = RealScalar(1)/src.template tail<2>().norm(); perp.coeffRef(0) = 0; perp.coeffRef(1) = -numext::conj(src.z())*invnm; perp.coeffRef(2) = numext::conj(src.y())*invnm; } return perp; } }; template struct unitOrthogonal_selector { typedef typename plain_matrix_type::type VectorType; static inline VectorType run(const Derived& src) { return VectorType(-numext::conj(src.y()), numext::conj(src.x())).normalized(); } }; } // end namespace internal /** \returns a unit vector which is orthogonal to \c *this * * The size of \c *this must be at least 2. If the size is exactly 2, * then the returned vector is a counter clock wise rotation of \c *this, i.e., (-y,x).normalized(). * * \sa cross() */ template typename MatrixBase::PlainObject MatrixBase::unitOrthogonal() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return internal::unitOrthogonal_selector::run(derived()); } } // end namespace Eigen #endif // EIGEN_ORTHOMETHODS_H RcppEigen/inst/include/Eigen/src/Geometry/Translation.h0000644000176200001440000001604112253717461022620 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRANSLATION_H #define EIGEN_TRANSLATION_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Translation * * \brief Represents a translation transformation * * \param _Scalar the scalar type, i.e., the type of the coefficients. * \param _Dim the dimension of the space, can be a compile time value or Dynamic * * \note This class is not aimed to be used to store a translation transformation, * but rather to make easier the constructions and updates of Transform objects. * * \sa class Scaling, class Transform */ template class Translation { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim) /** dimension of the space */ enum { Dim = _Dim }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; /** corresponding vector type */ typedef Matrix VectorType; /** corresponding linear transformation matrix type */ typedef Matrix LinearMatrixType; /** corresponding affine transformation type */ typedef Transform AffineTransformType; /** corresponding isometric transformation type */ typedef Transform IsometryTransformType; protected: VectorType m_coeffs; public: /** Default constructor without initialization. */ Translation() {} /** */ inline Translation(const Scalar& sx, const Scalar& sy) { eigen_assert(Dim==2); m_coeffs.x() = sx; m_coeffs.y() = sy; } /** */ inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz) { eigen_assert(Dim==3); m_coeffs.x() = sx; m_coeffs.y() = sy; m_coeffs.z() = sz; } /** Constructs and initialize the translation transformation from a vector of translation coefficients */ explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {} /** \brief Retruns the x-translation by value. **/ inline Scalar x() const { return m_coeffs.x(); } /** \brief Retruns the y-translation by value. **/ inline Scalar y() const { return m_coeffs.y(); } /** \brief Retruns the z-translation by value. **/ inline Scalar z() const { return m_coeffs.z(); } /** \brief Retruns the x-translation as a reference. **/ inline Scalar& x() { return m_coeffs.x(); } /** \brief Retruns the y-translation as a reference. **/ inline Scalar& y() { return m_coeffs.y(); } /** \brief Retruns the z-translation as a reference. **/ inline Scalar& z() { return m_coeffs.z(); } const VectorType& vector() const { return m_coeffs; } VectorType& vector() { return m_coeffs; } const VectorType& translation() const { return m_coeffs; } VectorType& translation() { return m_coeffs; } /** Concatenates two translation */ inline Translation operator* (const Translation& other) const { return Translation(m_coeffs + other.m_coeffs); } /** Concatenates a translation and a uniform scaling */ inline AffineTransformType operator* (const UniformScaling& other) const; /** Concatenates a translation and a linear transformation */ template inline AffineTransformType operator* (const EigenBase& linear) const; /** Concatenates a translation and a rotation */ template inline IsometryTransformType operator*(const RotationBase& r) const { return *this * IsometryTransformType(r); } /** \returns the concatenation of a linear transformation \a l with the translation \a t */ // its a nightmare to define a templated friend function outside its declaration template friend inline AffineTransformType operator*(const EigenBase& linear, const Translation& t) { AffineTransformType res; res.matrix().setZero(); res.linear() = linear.derived(); res.translation() = linear.derived() * t.m_coeffs; res.matrix().row(Dim).setZero(); res(Dim,Dim) = Scalar(1); return res; } /** Concatenates a translation and a transformation */ template inline Transform operator* (const Transform& t) const { Transform res = t; res.pretranslate(m_coeffs); return res; } /** Applies translation to vector */ inline VectorType operator* (const VectorType& other) const { return m_coeffs + other; } /** \returns the inverse translation (opposite) */ Translation inverse() const { return Translation(-m_coeffs); } Translation& operator=(const Translation& other) { m_coeffs = other.m_coeffs; return *this; } static const Translation Identity() { return Translation(VectorType::Zero()); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit Translation(const Translation& other) { m_coeffs = other.vector().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const Translation& other, typename NumTraits::Real prec = NumTraits::dummy_precision()) const { return m_coeffs.isApprox(other.m_coeffs, prec); } }; /** \addtogroup Geometry_Module */ //@{ typedef Translation Translation2f; typedef Translation Translation2d; typedef Translation Translation3f; typedef Translation Translation3d; //@} template inline typename Translation::AffineTransformType Translation::operator* (const UniformScaling& other) const { AffineTransformType res; res.matrix().setZero(); res.linear().diagonal().fill(other.factor()); res.translation() = m_coeffs; res(Dim,Dim) = Scalar(1); return res; } template template inline typename Translation::AffineTransformType Translation::operator* (const EigenBase& linear) const { AffineTransformType res; res.matrix().setZero(); res.linear() = linear.derived(); res.translation() = m_coeffs; res.matrix().row(Dim).setZero(); res(Dim,Dim) = Scalar(1); return res; } } // end namespace Eigen #endif // EIGEN_TRANSLATION_H RcppEigen/inst/include/Eigen/src/Geometry/RotationBase.h0000644000176200001440000001702312253717461022715 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ROTATIONBASE_H #define EIGEN_ROTATIONBASE_H namespace Eigen { // forward declaration namespace internal { template struct rotation_base_generic_product_selector; } /** \class RotationBase * * \brief Common base class for compact rotation representations * * \param Derived is the derived type, i.e., a rotation type * \param _Dim the dimension of the space */ template class RotationBase { public: enum { Dim = _Dim }; /** the scalar type of the coefficients */ typedef typename internal::traits::Scalar Scalar; /** corresponding linear transformation matrix type */ typedef Matrix RotationMatrixType; typedef Matrix VectorType; public: inline const Derived& derived() const { return *static_cast(this); } inline Derived& derived() { return *static_cast(this); } /** \returns an equivalent rotation matrix */ inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); } /** \returns an equivalent rotation matrix * This function is added to be conform with the Transform class' naming scheme. */ inline RotationMatrixType matrix() const { return derived().toRotationMatrix(); } /** \returns the inverse rotation */ inline Derived inverse() const { return derived().inverse(); } /** \returns the concatenation of the rotation \c *this with a translation \a t */ inline Transform operator*(const Translation& t) const { return Transform(*this) * t; } /** \returns the concatenation of the rotation \c *this with a uniform scaling \a s */ inline RotationMatrixType operator*(const UniformScaling& s) const { return toRotationMatrix() * s.factor(); } /** \returns the concatenation of the rotation \c *this with a generic expression \a e * \a e can be: * - a DimxDim linear transformation matrix * - a DimxDim diagonal matrix (axis aligned scaling) * - a vector of size Dim */ template EIGEN_STRONG_INLINE typename internal::rotation_base_generic_product_selector::ReturnType operator*(const EigenBase& e) const { return internal::rotation_base_generic_product_selector::run(derived(), e.derived()); } /** \returns the concatenation of a linear transformation \a l with the rotation \a r */ template friend inline RotationMatrixType operator*(const EigenBase& l, const Derived& r) { return l.derived() * r.toRotationMatrix(); } /** \returns the concatenation of a scaling \a l with the rotation \a r */ friend inline Transform operator*(const DiagonalMatrix& l, const Derived& r) { Transform res(r); res.linear().applyOnTheLeft(l); return res; } /** \returns the concatenation of the rotation \c *this with a transformation \a t */ template inline Transform operator*(const Transform& t) const { return toRotationMatrix() * t; } template inline VectorType _transformVector(const OtherVectorType& v) const { return toRotationMatrix() * v; } }; namespace internal { // implementation of the generic product rotation * matrix template struct rotation_base_generic_product_selector { enum { Dim = RotationDerived::Dim }; typedef Matrix ReturnType; static inline ReturnType run(const RotationDerived& r, const MatrixType& m) { return r.toRotationMatrix() * m; } }; template struct rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix, false > { typedef Transform ReturnType; static inline ReturnType run(const RotationDerived& r, const DiagonalMatrix& m) { ReturnType res(r); res.linear() *= m; return res; } }; template struct rotation_base_generic_product_selector { enum { Dim = RotationDerived::Dim }; typedef Matrix ReturnType; static EIGEN_STRONG_INLINE ReturnType run(const RotationDerived& r, const OtherVectorType& v) { return r._transformVector(v); } }; } // end namespace internal /** \geometry_module * * \brief Constructs a Dim x Dim rotation matrix from the rotation \a r */ template template Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> ::Matrix(const RotationBase& r) { EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim)) *this = r.toRotationMatrix(); } /** \geometry_module * * \brief Set a Dim x Dim rotation matrix from the rotation \a r */ template template Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>& Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> ::operator=(const RotationBase& r) { EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim)) return *this = r.toRotationMatrix(); } namespace internal { /** \internal * * Helper function to return an arbitrary rotation object to a rotation matrix. * * \param Scalar the numeric type of the matrix coefficients * \param Dim the dimension of the current space * * It returns a Dim x Dim fixed size matrix. * * Default specializations are provided for: * - any scalar type (2D), * - any matrix expression, * - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D) * * Currently toRotationMatrix is only used by Transform. * * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis */ template static inline Matrix toRotationMatrix(const Scalar& s) { EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE) return Rotation2D(s).toRotationMatrix(); } template static inline Matrix toRotationMatrix(const RotationBase& r) { return r.toRotationMatrix(); } template static inline const MatrixBase& toRotationMatrix(const MatrixBase& mat) { EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim, YOU_MADE_A_PROGRAMMING_MISTAKE) return mat; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_ROTATIONBASE_H RcppEigen/inst/include/Eigen/src/Geometry/Transform.h0000644000176200001440000015564412446006401022277 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2009 Benoit Jacob // Copyright (C) 2010 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRANSFORM_H #define EIGEN_TRANSFORM_H namespace Eigen { namespace internal { template struct transform_traits { enum { Dim = Transform::Dim, HDim = Transform::HDim, Mode = Transform::Mode, IsProjective = (int(Mode)==int(Projective)) }; }; template< typename TransformType, typename MatrixType, int Case = transform_traits::IsProjective ? 0 : int(MatrixType::RowsAtCompileTime) == int(transform_traits::HDim) ? 1 : 2> struct transform_right_product_impl; template< typename Other, int Mode, int Options, int Dim, int HDim, int OtherRows=Other::RowsAtCompileTime, int OtherCols=Other::ColsAtCompileTime> struct transform_left_product_impl; template< typename Lhs, typename Rhs, bool AnyProjective = transform_traits::IsProjective || transform_traits::IsProjective> struct transform_transform_product_impl; template< typename Other, int Mode, int Options, int Dim, int HDim, int OtherRows=Other::RowsAtCompileTime, int OtherCols=Other::ColsAtCompileTime> struct transform_construct_from_matrix; template struct transform_take_affine_part; template struct transform_make_affine; } // end namespace internal /** \geometry_module \ingroup Geometry_Module * * \class Transform * * \brief Represents an homogeneous transformation in a N dimensional space * * \tparam _Scalar the scalar type, i.e., the type of the coefficients * \tparam _Dim the dimension of the space * \tparam _Mode the type of the transformation. Can be: * - #Affine: the transformation is stored as a (Dim+1)^2 matrix, * where the last row is assumed to be [0 ... 0 1]. * - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix. * - #Projective: the transformation is stored as a (Dim+1)^2 matrix * without any assumption. * \tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor. * These Options are passed directly to the underlying matrix type. * * The homography is internally represented and stored by a matrix which * is available through the matrix() method. To understand the behavior of * this class you have to think a Transform object as its internal * matrix representation. The chosen convention is right multiply: * * \code v' = T * v \endcode * * Therefore, an affine transformation matrix M is shaped like this: * * \f$ \left( \begin{array}{cc} * linear & translation\\ * 0 ... 0 & 1 * \end{array} \right) \f$ * * Note that for a projective transformation the last row can be anything, * and then the interpretation of different parts might be sightly different. * * However, unlike a plain matrix, the Transform class provides many features * simplifying both its assembly and usage. In particular, it can be composed * with any other transformations (Transform,Translation,RotationBase,Matrix) * and can be directly used to transform implicit homogeneous vectors. All these * operations are handled via the operator*. For the composition of transformations, * its principle consists to first convert the right/left hand sides of the product * to a compatible (Dim+1)^2 matrix and then perform a pure matrix product. * Of course, internally, operator* tries to perform the minimal number of operations * according to the nature of each terms. Likewise, when applying the transform * to non homogeneous vectors, the latters are automatically promoted to homogeneous * one before doing the matrix product. The convertions to homogeneous representations * are performed as follow: * * \b Translation t (Dim)x(1): * \f$ \left( \begin{array}{cc} * I & t \\ * 0\,...\,0 & 1 * \end{array} \right) \f$ * * \b Rotation R (Dim)x(Dim): * \f$ \left( \begin{array}{cc} * R & 0\\ * 0\,...\,0 & 1 * \end{array} \right) \f$ * * \b Linear \b Matrix L (Dim)x(Dim): * \f$ \left( \begin{array}{cc} * L & 0\\ * 0\,...\,0 & 1 * \end{array} \right) \f$ * * \b Affine \b Matrix A (Dim)x(Dim+1): * \f$ \left( \begin{array}{c} * A\\ * 0\,...\,0\,1 * \end{array} \right) \f$ * * \b Column \b vector v (Dim)x(1): * \f$ \left( \begin{array}{c} * v\\ * 1 * \end{array} \right) \f$ * * \b Set \b of \b column \b vectors V1...Vn (Dim)x(n): * \f$ \left( \begin{array}{ccc} * v_1 & ... & v_n\\ * 1 & ... & 1 * \end{array} \right) \f$ * * The concatenation of a Transform object with any kind of other transformation * always returns a Transform object. * * A little exception to the "as pure matrix product" rule is the case of the * transformation of non homogeneous vectors by an affine transformation. In * that case the last matrix row can be ignored, and the product returns non * homogeneous vectors. * * Since, for instance, a Dim x Dim matrix is interpreted as a linear transformation, * it is not possible to directly transform Dim vectors stored in a Dim x Dim matrix. * The solution is either to use a Dim x Dynamic matrix or explicitly request a * vector transformation by making the vector homogeneous: * \code * m' = T * m.colwise().homogeneous(); * \endcode * Note that there is zero overhead. * * Conversion methods from/to Qt's QMatrix and QTransform are available if the * preprocessor token EIGEN_QT_SUPPORT is defined. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_TRANSFORM_PLUGIN. * * \sa class Matrix, class Quaternion */ template class Transform { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1)) enum { Mode = _Mode, Options = _Options, Dim = _Dim, ///< space dimension in which the transformation holds HDim = _Dim+1, ///< size of a respective homogeneous vector Rows = int(Mode)==(AffineCompact) ? Dim : HDim }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; typedef DenseIndex Index; /** type of the matrix used to represent the transformation */ typedef typename internal::make_proper_matrix_type::type MatrixType; /** constified MatrixType */ typedef const MatrixType ConstMatrixType; /** type of the matrix used to represent the linear part of the transformation */ typedef Matrix LinearMatrixType; /** type of read/write reference to the linear part of the transformation */ typedef Block LinearPart; /** type of read reference to the linear part of the transformation */ typedef const Block ConstLinearPart; /** type of read/write reference to the affine part of the transformation */ typedef typename internal::conditional >::type AffinePart; /** type of read reference to the affine part of the transformation */ typedef typename internal::conditional >::type ConstAffinePart; /** type of a vector */ typedef Matrix VectorType; /** type of a read/write reference to the translation part of the rotation */ typedef Block TranslationPart; /** type of a read reference to the translation part of the rotation */ typedef const Block ConstTranslationPart; /** corresponding translation type */ typedef Translation TranslationType; // this intermediate enum is needed to avoid an ICE with gcc 3.4 and 4.0 enum { TransformTimeDiagonalMode = ((Mode==int(Isometry))?Affine:int(Mode)) }; /** The return type of the product between a diagonal matrix and a transform */ typedef Transform TransformTimeDiagonalReturnType; protected: MatrixType m_matrix; public: /** Default constructor without initialization of the meaningful coefficients. * If Mode==Affine, then the last row is set to [0 ... 0 1] */ inline Transform() { check_template_params(); internal::transform_make_affine<(int(Mode)==Affine) ? Affine : AffineCompact>::run(m_matrix); } inline Transform(const Transform& other) { check_template_params(); m_matrix = other.m_matrix; } inline explicit Transform(const TranslationType& t) { check_template_params(); *this = t; } inline explicit Transform(const UniformScaling& s) { check_template_params(); *this = s; } template inline explicit Transform(const RotationBase& r) { check_template_params(); *this = r; } inline Transform& operator=(const Transform& other) { m_matrix = other.m_matrix; return *this; } typedef internal::transform_take_affine_part take_affine_part; /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */ template inline explicit Transform(const EigenBase& other) { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY); check_template_params(); internal::transform_construct_from_matrix::run(this, other.derived()); } /** Set \c *this from a Dim^2 or (Dim+1)^2 matrix. */ template inline Transform& operator=(const EigenBase& other) { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY); internal::transform_construct_from_matrix::run(this, other.derived()); return *this; } template inline Transform(const Transform& other) { check_template_params(); // only the options change, we can directly copy the matrices m_matrix = other.matrix(); } template inline Transform(const Transform& other) { check_template_params(); // prevent conversions as: // Affine | AffineCompact | Isometry = Projective EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Projective), Mode==int(Projective)), YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION) // prevent conversions as: // Isometry = Affine | AffineCompact EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Affine)||OtherMode==int(AffineCompact), Mode!=int(Isometry)), YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION) enum { ModeIsAffineCompact = Mode == int(AffineCompact), OtherModeIsAffineCompact = OtherMode == int(AffineCompact) }; if(ModeIsAffineCompact == OtherModeIsAffineCompact) { // We need the block expression because the code is compiled for all // combinations of transformations and will trigger a compile time error // if one tries to assign the matrices directly m_matrix.template block(0,0) = other.matrix().template block(0,0); makeAffine(); } else if(OtherModeIsAffineCompact) { typedef typename Transform::MatrixType OtherMatrixType; internal::transform_construct_from_matrix::run(this, other.matrix()); } else { // here we know that Mode == AffineCompact and OtherMode != AffineCompact. // if OtherMode were Projective, the static assert above would already have caught it. // So the only possibility is that OtherMode == Affine linear() = other.linear(); translation() = other.translation(); } } template Transform(const ReturnByValue& other) { check_template_params(); other.evalTo(*this); } template Transform& operator=(const ReturnByValue& other) { other.evalTo(*this); return *this; } #ifdef EIGEN_QT_SUPPORT inline Transform(const QMatrix& other); inline Transform& operator=(const QMatrix& other); inline QMatrix toQMatrix(void) const; inline Transform(const QTransform& other); inline Transform& operator=(const QTransform& other); inline QTransform toQTransform(void) const; #endif /** shortcut for m_matrix(row,col); * \sa MatrixBase::operator(Index,Index) const */ inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); } /** shortcut for m_matrix(row,col); * \sa MatrixBase::operator(Index,Index) */ inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); } /** \returns a read-only expression of the transformation matrix */ inline const MatrixType& matrix() const { return m_matrix; } /** \returns a writable expression of the transformation matrix */ inline MatrixType& matrix() { return m_matrix; } /** \returns a read-only expression of the linear part of the transformation */ inline ConstLinearPart linear() const { return ConstLinearPart(m_matrix,0,0); } /** \returns a writable expression of the linear part of the transformation */ inline LinearPart linear() { return LinearPart(m_matrix,0,0); } /** \returns a read-only expression of the Dim x HDim affine part of the transformation */ inline ConstAffinePart affine() const { return take_affine_part::run(m_matrix); } /** \returns a writable expression of the Dim x HDim affine part of the transformation */ inline AffinePart affine() { return take_affine_part::run(m_matrix); } /** \returns a read-only expression of the translation vector of the transformation */ inline ConstTranslationPart translation() const { return ConstTranslationPart(m_matrix,0,Dim); } /** \returns a writable expression of the translation vector of the transformation */ inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); } /** \returns an expression of the product between the transform \c *this and a matrix expression \a other * * The right hand side \a other might be either: * \li a vector of size Dim, * \li an homogeneous vector of size Dim+1, * \li a set of vectors of size Dim x Dynamic, * \li a set of homogeneous vectors of size Dim+1 x Dynamic, * \li a linear transformation matrix of size Dim x Dim, * \li an affine transformation matrix of size Dim x Dim+1, * \li a transformation matrix of size Dim+1 x Dim+1. */ // note: this function is defined here because some compilers cannot find the respective declaration template EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl::ResultType operator * (const EigenBase &other) const { return internal::transform_right_product_impl::run(*this,other.derived()); } /** \returns the product expression of a transformation matrix \a a times a transform \a b * * The left hand side \a other might be either: * \li a linear transformation matrix of size Dim x Dim, * \li an affine transformation matrix of size Dim x Dim+1, * \li a general transformation matrix of size Dim+1 x Dim+1. */ template friend inline const typename internal::transform_left_product_impl::ResultType operator * (const EigenBase &a, const Transform &b) { return internal::transform_left_product_impl::run(a.derived(),b); } /** \returns The product expression of a transform \a a times a diagonal matrix \a b * * The rhs diagonal matrix is interpreted as an affine scaling transformation. The * product results in a Transform of the same type (mode) as the lhs only if the lhs * mode is no isometry. In that case, the returned transform is an affinity. */ template inline const TransformTimeDiagonalReturnType operator * (const DiagonalBase &b) const { TransformTimeDiagonalReturnType res(*this); res.linear() *= b; return res; } /** \returns The product expression of a diagonal matrix \a a times a transform \a b * * The lhs diagonal matrix is interpreted as an affine scaling transformation. The * product results in a Transform of the same type (mode) as the lhs only if the lhs * mode is no isometry. In that case, the returned transform is an affinity. */ template friend inline TransformTimeDiagonalReturnType operator * (const DiagonalBase &a, const Transform &b) { TransformTimeDiagonalReturnType res; res.linear().noalias() = a*b.linear(); res.translation().noalias() = a*b.translation(); if (Mode!=int(AffineCompact)) res.matrix().row(Dim) = b.matrix().row(Dim); return res; } template inline Transform& operator*=(const EigenBase& other) { return *this = *this * other; } /** Concatenates two transformations */ inline const Transform operator * (const Transform& other) const { return internal::transform_transform_product_impl::run(*this,other); } #ifdef __INTEL_COMPILER private: // this intermediate structure permits to workaround a bug in ICC 11: // error: template instantiation resulted in unexpected function type of "Eigen::Transform // (const Eigen::Transform &) const" // (the meaning of a name may have changed since the template declaration -- the type of the template is: // "Eigen::internal::transform_transform_product_impl, // Eigen::Transform, >::ResultType (const Eigen::Transform &) const") // template struct icc_11_workaround { typedef internal::transform_transform_product_impl > ProductType; typedef typename ProductType::ResultType ResultType; }; public: /** Concatenates two different transformations */ template inline typename icc_11_workaround::ResultType operator * (const Transform& other) const { typedef typename icc_11_workaround::ProductType ProductType; return ProductType::run(*this,other); } #else /** Concatenates two different transformations */ template inline typename internal::transform_transform_product_impl >::ResultType operator * (const Transform& other) const { return internal::transform_transform_product_impl >::run(*this,other); } #endif /** \sa MatrixBase::setIdentity() */ void setIdentity() { m_matrix.setIdentity(); } /** * \brief Returns an identity transformation. * \todo In the future this function should be returning a Transform expression. */ static const Transform Identity() { return Transform(MatrixType::Identity()); } template inline Transform& scale(const MatrixBase &other); template inline Transform& prescale(const MatrixBase &other); inline Transform& scale(const Scalar& s); inline Transform& prescale(const Scalar& s); template inline Transform& translate(const MatrixBase &other); template inline Transform& pretranslate(const MatrixBase &other); template inline Transform& rotate(const RotationType& rotation); template inline Transform& prerotate(const RotationType& rotation); Transform& shear(const Scalar& sx, const Scalar& sy); Transform& preshear(const Scalar& sx, const Scalar& sy); inline Transform& operator=(const TranslationType& t); inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); } inline Transform operator*(const TranslationType& t) const; inline Transform& operator=(const UniformScaling& t); inline Transform& operator*=(const UniformScaling& s) { return scale(s.factor()); } inline Transform operator*(const UniformScaling& s) const { Transform res = *this; res.scale(s.factor()); return res; } inline Transform& operator*=(const DiagonalMatrix& s) { linear() *= s; return *this; } template inline Transform& operator=(const RotationBase& r); template inline Transform& operator*=(const RotationBase& r) { return rotate(r.toRotationMatrix()); } template inline Transform operator*(const RotationBase& r) const; const LinearMatrixType rotation() const; template void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const; template void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const; template Transform& fromPositionOrientationScale(const MatrixBase &position, const OrientationType& orientation, const MatrixBase &scale); inline Transform inverse(TransformTraits traits = (TransformTraits)Mode) const; /** \returns a const pointer to the column major internal matrix */ const Scalar* data() const { return m_matrix.data(); } /** \returns a non-const pointer to the column major internal matrix */ Scalar* data() { return m_matrix.data(); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit Transform(const Transform& other) { check_template_params(); m_matrix = other.matrix().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const Transform& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const { return m_matrix.isApprox(other.m_matrix, prec); } /** Sets the last row to [0 ... 0 1] */ void makeAffine() { internal::transform_make_affine::run(m_matrix); } /** \internal * \returns the Dim x Dim linear part if the transformation is affine, * and the HDim x Dim part for projective transformations. */ inline Block linearExt() { return m_matrix.template block(0,0); } /** \internal * \returns the Dim x Dim linear part if the transformation is affine, * and the HDim x Dim part for projective transformations. */ inline const Block linearExt() const { return m_matrix.template block(0,0); } /** \internal * \returns the translation part if the transformation is affine, * and the last column for projective transformations. */ inline Block translationExt() { return m_matrix.template block(0,Dim); } /** \internal * \returns the translation part if the transformation is affine, * and the last column for projective transformations. */ inline const Block translationExt() const { return m_matrix.template block(0,Dim); } #ifdef EIGEN_TRANSFORM_PLUGIN #include EIGEN_TRANSFORM_PLUGIN #endif protected: #ifndef EIGEN_PARSED_BY_DOXYGEN static EIGEN_STRONG_INLINE void check_template_params() { EIGEN_STATIC_ASSERT((Options & (DontAlign|RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) } #endif }; /** \ingroup Geometry_Module */ typedef Transform Isometry2f; /** \ingroup Geometry_Module */ typedef Transform Isometry3f; /** \ingroup Geometry_Module */ typedef Transform Isometry2d; /** \ingroup Geometry_Module */ typedef Transform Isometry3d; /** \ingroup Geometry_Module */ typedef Transform Affine2f; /** \ingroup Geometry_Module */ typedef Transform Affine3f; /** \ingroup Geometry_Module */ typedef Transform Affine2d; /** \ingroup Geometry_Module */ typedef Transform Affine3d; /** \ingroup Geometry_Module */ typedef Transform AffineCompact2f; /** \ingroup Geometry_Module */ typedef Transform AffineCompact3f; /** \ingroup Geometry_Module */ typedef Transform AffineCompact2d; /** \ingroup Geometry_Module */ typedef Transform AffineCompact3d; /** \ingroup Geometry_Module */ typedef Transform Projective2f; /** \ingroup Geometry_Module */ typedef Transform Projective3f; /** \ingroup Geometry_Module */ typedef Transform Projective2d; /** \ingroup Geometry_Module */ typedef Transform Projective3d; /************************** *** Optional QT support *** **************************/ #ifdef EIGEN_QT_SUPPORT /** Initializes \c *this from a QMatrix assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template Transform::Transform(const QMatrix& other) { check_template_params(); *this = other; } /** Set \c *this from a QMatrix assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template Transform& Transform::operator=(const QMatrix& other) { EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) m_matrix << other.m11(), other.m21(), other.dx(), other.m12(), other.m22(), other.dy(), 0, 0, 1; return *this; } /** \returns a QMatrix from \c *this assuming the dimension is 2. * * \warning this conversion might loss data if \c *this is not affine * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template QMatrix Transform::toQMatrix(void) const { check_template_params(); EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(0,2), m_matrix.coeff(1,2)); } /** Initializes \c *this from a QTransform assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template Transform::Transform(const QTransform& other) { check_template_params(); *this = other; } /** Set \c *this from a QTransform assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template Transform& Transform::operator=(const QTransform& other) { check_template_params(); EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) if (Mode == int(AffineCompact)) m_matrix << other.m11(), other.m21(), other.dx(), other.m12(), other.m22(), other.dy(); else m_matrix << other.m11(), other.m21(), other.dx(), other.m12(), other.m22(), other.dy(), other.m13(), other.m23(), other.m33(); return *this; } /** \returns a QTransform from \c *this assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template QTransform Transform::toQTransform(void) const { EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) if (Mode == int(AffineCompact)) return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(0,2), m_matrix.coeff(1,2)); else return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0), m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1), m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2)); } #endif /********************* *** Procedural API *** *********************/ /** Applies on the right the non uniform scale transformation represented * by the vector \a other to \c *this and returns a reference to \c *this. * \sa prescale() */ template template Transform& Transform::scale(const MatrixBase &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) linearExt().noalias() = (linearExt() * other.asDiagonal()); return *this; } /** Applies on the right a uniform scale of a factor \a c to \c *this * and returns a reference to \c *this. * \sa prescale(Scalar) */ template inline Transform& Transform::scale(const Scalar& s) { EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) linearExt() *= s; return *this; } /** Applies on the left the non uniform scale transformation represented * by the vector \a other to \c *this and returns a reference to \c *this. * \sa scale() */ template template Transform& Transform::prescale(const MatrixBase &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) m_matrix.template block(0,0).noalias() = (other.asDiagonal() * m_matrix.template block(0,0)); return *this; } /** Applies on the left a uniform scale of a factor \a c to \c *this * and returns a reference to \c *this. * \sa scale(Scalar) */ template inline Transform& Transform::prescale(const Scalar& s) { EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) m_matrix.template topRows() *= s; return *this; } /** Applies on the right the translation matrix represented by the vector \a other * to \c *this and returns a reference to \c *this. * \sa pretranslate() */ template template Transform& Transform::translate(const MatrixBase &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) translationExt() += linearExt() * other; return *this; } /** Applies on the left the translation matrix represented by the vector \a other * to \c *this and returns a reference to \c *this. * \sa translate() */ template template Transform& Transform::pretranslate(const MatrixBase &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) if(int(Mode)==int(Projective)) affine() += other * m_matrix.row(Dim); else translation() += other; return *this; } /** Applies on the right the rotation represented by the rotation \a rotation * to \c *this and returns a reference to \c *this. * * The template parameter \a RotationType is the type of the rotation which * must be known by internal::toRotationMatrix<>. * * Natively supported types includes: * - any scalar (2D), * - a Dim x Dim matrix expression, * - a Quaternion (3D), * - a AngleAxis (3D) * * This mechanism is easily extendable to support user types such as Euler angles, * or a pair of Quaternion for 4D rotations. * * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType) */ template template Transform& Transform::rotate(const RotationType& rotation) { linearExt() *= internal::toRotationMatrix(rotation); return *this; } /** Applies on the left the rotation represented by the rotation \a rotation * to \c *this and returns a reference to \c *this. * * See rotate() for further details. * * \sa rotate() */ template template Transform& Transform::prerotate(const RotationType& rotation) { m_matrix.template block(0,0) = internal::toRotationMatrix(rotation) * m_matrix.template block(0,0); return *this; } /** Applies on the right the shear transformation represented * by the vector \a other to \c *this and returns a reference to \c *this. * \warning 2D only. * \sa preshear() */ template Transform& Transform::shear(const Scalar& sx, const Scalar& sy) { EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) VectorType tmp = linear().col(0)*sy + linear().col(1); linear() << linear().col(0) + linear().col(1)*sx, tmp; return *this; } /** Applies on the left the shear transformation represented * by the vector \a other to \c *this and returns a reference to \c *this. * \warning 2D only. * \sa shear() */ template Transform& Transform::preshear(const Scalar& sx, const Scalar& sy) { EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) m_matrix.template block(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block(0,0); return *this; } /****************************************************** *** Scaling, Translation and Rotation compatibility *** ******************************************************/ template inline Transform& Transform::operator=(const TranslationType& t) { linear().setIdentity(); translation() = t.vector(); makeAffine(); return *this; } template inline Transform Transform::operator*(const TranslationType& t) const { Transform res = *this; res.translate(t.vector()); return res; } template inline Transform& Transform::operator=(const UniformScaling& s) { m_matrix.setZero(); linear().diagonal().fill(s.factor()); makeAffine(); return *this; } template template inline Transform& Transform::operator=(const RotationBase& r) { linear() = internal::toRotationMatrix(r); translation().setZero(); makeAffine(); return *this; } template template inline Transform Transform::operator*(const RotationBase& r) const { Transform res = *this; res.rotate(r.derived()); return res; } /************************ *** Special functions *** ************************/ /** \returns the rotation part of the transformation * * * \svd_module * * \sa computeRotationScaling(), computeScalingRotation(), class SVD */ template const typename Transform::LinearMatrixType Transform::rotation() const { LinearMatrixType result; computeRotationScaling(&result, (LinearMatrixType*)0); return result; } /** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being * not necessarily positive. * * If either pointer is zero, the corresponding computation is skipped. * * * * \svd_module * * \sa computeScalingRotation(), rotation(), class SVD */ template template void Transform::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const { JacobiSVD svd(linear(), ComputeFullU | ComputeFullV); Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1 VectorType sv(svd.singularValues()); sv.coeffRef(0) *= x; if(scaling) scaling->lazyAssign(svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint()); if(rotation) { LinearMatrixType m(svd.matrixU()); m.col(0) /= x; rotation->lazyAssign(m * svd.matrixV().adjoint()); } } /** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being * not necessarily positive. * * If either pointer is zero, the corresponding computation is skipped. * * * * \svd_module * * \sa computeRotationScaling(), rotation(), class SVD */ template template void Transform::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const { JacobiSVD svd(linear(), ComputeFullU | ComputeFullV); Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1 VectorType sv(svd.singularValues()); sv.coeffRef(0) *= x; if(scaling) scaling->lazyAssign(svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint()); if(rotation) { LinearMatrixType m(svd.matrixU()); m.col(0) /= x; rotation->lazyAssign(m * svd.matrixV().adjoint()); } } /** Convenient method to set \c *this from a position, orientation and scale * of a 3D object. */ template template Transform& Transform::fromPositionOrientationScale(const MatrixBase &position, const OrientationType& orientation, const MatrixBase &scale) { linear() = internal::toRotationMatrix(orientation); linear() *= scale.asDiagonal(); translation() = position; makeAffine(); return *this; } namespace internal { template struct transform_make_affine { template static void run(MatrixType &mat) { static const int Dim = MatrixType::ColsAtCompileTime-1; mat.template block<1,Dim>(Dim,0).setZero(); mat.coeffRef(Dim,Dim) = typename MatrixType::Scalar(1); } }; template<> struct transform_make_affine { template static void run(MatrixType &) { } }; // selector needed to avoid taking the inverse of a 3x4 matrix template struct projective_transform_inverse { static inline void run(const TransformType&, TransformType&) {} }; template struct projective_transform_inverse { static inline void run(const TransformType& m, TransformType& res) { res.matrix() = m.matrix().inverse(); } }; } // end namespace internal /** * * \returns the inverse transformation according to some given knowledge * on \c *this. * * \param hint allows to optimize the inversion process when the transformation * is known to be not a general transformation (optional). The possible values are: * - #Projective if the transformation is not necessarily affine, i.e., if the * last row is not guaranteed to be [0 ... 0 1] * - #Affine if the last row can be assumed to be [0 ... 0 1] * - #Isometry if the transformation is only a concatenations of translations * and rotations. * The default is the template class parameter \c Mode. * * \warning unless \a traits is always set to NoShear or NoScaling, this function * requires the generic inverse method of MatrixBase defined in the LU module. If * you forget to include this module, then you will get hard to debug linking errors. * * \sa MatrixBase::inverse() */ template Transform Transform::inverse(TransformTraits hint) const { Transform res; if (hint == Projective) { internal::projective_transform_inverse::run(*this, res); } else { if (hint == Isometry) { res.matrix().template topLeftCorner() = linear().transpose(); } else if(hint&Affine) { res.matrix().template topLeftCorner() = linear().inverse(); } else { eigen_assert(false && "Invalid transform traits in Transform::Inverse"); } // translation and remaining parts res.matrix().template topRightCorner() = - res.matrix().template topLeftCorner() * translation(); res.makeAffine(); // we do need this, because in the beginning res is uninitialized } return res; } namespace internal { /***************************************************** *** Specializations of take affine part *** *****************************************************/ template struct transform_take_affine_part { typedef typename TransformType::MatrixType MatrixType; typedef typename TransformType::AffinePart AffinePart; typedef typename TransformType::ConstAffinePart ConstAffinePart; static inline AffinePart run(MatrixType& m) { return m.template block(0,0); } static inline ConstAffinePart run(const MatrixType& m) { return m.template block(0,0); } }; template struct transform_take_affine_part > { typedef typename Transform::MatrixType MatrixType; static inline MatrixType& run(MatrixType& m) { return m; } static inline const MatrixType& run(const MatrixType& m) { return m; } }; /***************************************************** *** Specializations of construct from matrix *** *****************************************************/ template struct transform_construct_from_matrix { static inline void run(Transform *transform, const Other& other) { transform->linear() = other; transform->translation().setZero(); transform->makeAffine(); } }; template struct transform_construct_from_matrix { static inline void run(Transform *transform, const Other& other) { transform->affine() = other; transform->makeAffine(); } }; template struct transform_construct_from_matrix { static inline void run(Transform *transform, const Other& other) { transform->matrix() = other; } }; template struct transform_construct_from_matrix { static inline void run(Transform *transform, const Other& other) { transform->matrix() = other.template block(0,0); } }; /********************************************************** *** Specializations of operator* with rhs EigenBase *** **********************************************************/ template struct transform_product_result { enum { Mode = (LhsMode == (int)Projective || RhsMode == (int)Projective ) ? Projective : (LhsMode == (int)Affine || RhsMode == (int)Affine ) ? Affine : (LhsMode == (int)AffineCompact || RhsMode == (int)AffineCompact ) ? AffineCompact : (LhsMode == (int)Isometry || RhsMode == (int)Isometry ) ? Isometry : Projective }; }; template< typename TransformType, typename MatrixType > struct transform_right_product_impl< TransformType, MatrixType, 0 > { typedef typename MatrixType::PlainObject ResultType; static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { return T.matrix() * other; } }; template< typename TransformType, typename MatrixType > struct transform_right_product_impl< TransformType, MatrixType, 1 > { enum { Dim = TransformType::Dim, HDim = TransformType::HDim, OtherRows = MatrixType::RowsAtCompileTime, OtherCols = MatrixType::ColsAtCompileTime }; typedef typename MatrixType::PlainObject ResultType; static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); typedef Block TopLeftLhs; ResultType res(other.rows(),other.cols()); TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other; res.row(OtherRows-1) = other.row(OtherRows-1); return res; } }; template< typename TransformType, typename MatrixType > struct transform_right_product_impl< TransformType, MatrixType, 2 > { enum { Dim = TransformType::Dim, HDim = TransformType::HDim, OtherRows = MatrixType::RowsAtCompileTime, OtherCols = MatrixType::ColsAtCompileTime }; typedef typename MatrixType::PlainObject ResultType; static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); typedef Block TopLeftLhs; ResultType res(Replicate(T.translation(),1,other.cols())); TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() += T.linear() * other; return res; } }; /********************************************************** *** Specializations of operator* with lhs EigenBase *** **********************************************************/ // generic HDim x HDim matrix * T => Projective template struct transform_left_product_impl { typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef Transform ResultType; static ResultType run(const Other& other,const TransformType& tr) { return ResultType(other * tr.matrix()); } }; // generic HDim x HDim matrix * AffineCompact => Projective template struct transform_left_product_impl { typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef Transform ResultType; static ResultType run(const Other& other,const TransformType& tr) { ResultType res; res.matrix().noalias() = other.template block(0,0) * tr.matrix(); res.matrix().col(Dim) += other.col(Dim); return res; } }; // affine matrix * T template struct transform_left_product_impl { typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef TransformType ResultType; static ResultType run(const Other& other,const TransformType& tr) { ResultType res; res.affine().noalias() = other * tr.matrix(); res.matrix().row(Dim) = tr.matrix().row(Dim); return res; } }; // affine matrix * AffineCompact template struct transform_left_product_impl { typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef TransformType ResultType; static ResultType run(const Other& other,const TransformType& tr) { ResultType res; res.matrix().noalias() = other.template block(0,0) * tr.matrix(); res.translation() += other.col(Dim); return res; } }; // linear matrix * T template struct transform_left_product_impl { typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef TransformType ResultType; static ResultType run(const Other& other, const TransformType& tr) { TransformType res; if(Mode!=int(AffineCompact)) res.matrix().row(Dim) = tr.matrix().row(Dim); res.matrix().template topRows().noalias() = other * tr.matrix().template topRows(); return res; } }; /********************************************************** *** Specializations of operator* with another Transform *** **********************************************************/ template struct transform_transform_product_impl,Transform,false > { enum { ResultMode = transform_product_result::Mode }; typedef Transform Lhs; typedef Transform Rhs; typedef Transform ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { ResultType res; res.linear() = lhs.linear() * rhs.linear(); res.translation() = lhs.linear() * rhs.translation() + lhs.translation(); res.makeAffine(); return res; } }; template struct transform_transform_product_impl,Transform,true > { typedef Transform Lhs; typedef Transform Rhs; typedef Transform ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { return ResultType( lhs.matrix() * rhs.matrix() ); } }; template struct transform_transform_product_impl,Transform,true > { typedef Transform Lhs; typedef Transform Rhs; typedef Transform ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { ResultType res; res.matrix().template topRows() = lhs.matrix() * rhs.matrix(); res.matrix().row(Dim) = rhs.matrix().row(Dim); return res; } }; template struct transform_transform_product_impl,Transform,true > { typedef Transform Lhs; typedef Transform Rhs; typedef Transform ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { ResultType res(lhs.matrix().template leftCols() * rhs.matrix()); res.matrix().col(Dim) += lhs.matrix().col(Dim); return res; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_TRANSFORM_H RcppEigen/inst/include/Eigen/src/Geometry/AlignedBox.h0000644000176200001440000003330012647570720022335 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ALIGNEDBOX_H #define EIGEN_ALIGNEDBOX_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * * \class AlignedBox * * \brief An axis aligned box * * \tparam _Scalar the type of the scalar coefficients * \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. * * This class represents an axis aligned box as a pair of the minimal and maximal corners. * \warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty(). * \sa alignedboxtypedefs */ template class AlignedBox { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) enum { AmbientDimAtCompileTime = _AmbientDim }; typedef _Scalar Scalar; typedef NumTraits ScalarTraits; typedef DenseIndex Index; typedef typename ScalarTraits::Real RealScalar; typedef typename ScalarTraits::NonInteger NonInteger; typedef Matrix VectorType; /** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */ enum CornerType { /** 1D names @{ */ Min=0, Max=1, /** @} */ /** Identifier for 2D corner @{ */ BottomLeft=0, BottomRight=1, TopLeft=2, TopRight=3, /** @} */ /** Identifier for 3D corner @{ */ BottomLeftFloor=0, BottomRightFloor=1, TopLeftFloor=2, TopRightFloor=3, BottomLeftCeil=4, BottomRightCeil=5, TopLeftCeil=6, TopRightCeil=7 /** @} */ }; /** Default constructor initializing a null box. */ inline AlignedBox() { if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); } /** Constructs a null box with \a _dim the dimension of the ambient space. */ inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim) { setEmpty(); } /** Constructs a box with extremities \a _min and \a _max. * \warning If either component of \a _min is larger than the same component of \a _max, the constructed box is empty. */ template inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {} /** Constructs a box containing a single point \a p. */ template inline explicit AlignedBox(const MatrixBase& p) : m_min(p), m_max(m_min) { } ~AlignedBox() {} /** \returns the dimension in which the box holds */ inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); } /** \deprecated use isEmpty() */ inline bool isNull() const { return isEmpty(); } /** \deprecated use setEmpty() */ inline void setNull() { setEmpty(); } /** \returns true if the box is empty. * \sa setEmpty */ inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); } /** Makes \c *this an empty box. * \sa isEmpty */ inline void setEmpty() { m_min.setConstant( ScalarTraits::highest() ); m_max.setConstant( ScalarTraits::lowest() ); } /** \returns the minimal corner */ inline const VectorType& (min)() const { return m_min; } /** \returns a non const reference to the minimal corner */ inline VectorType& (min)() { return m_min; } /** \returns the maximal corner */ inline const VectorType& (max)() const { return m_max; } /** \returns a non const reference to the maximal corner */ inline VectorType& (max)() { return m_max; } /** \returns the center of the box */ inline const CwiseUnaryOp, const CwiseBinaryOp, const VectorType, const VectorType> > center() const { return (m_min+m_max)/2; } /** \returns the lengths of the sides of the bounding box. * Note that this function does not get the same * result for integral or floating scalar types: see */ inline const CwiseBinaryOp< internal::scalar_difference_op, const VectorType, const VectorType> sizes() const { return m_max - m_min; } /** \returns the volume of the bounding box */ inline Scalar volume() const { return sizes().prod(); } /** \returns an expression for the bounding box diagonal vector * if the length of the diagonal is needed: diagonal().norm() * will provide it. */ inline CwiseBinaryOp< internal::scalar_difference_op, const VectorType, const VectorType> diagonal() const { return sizes(); } /** \returns the vertex of the bounding box at the corner defined by * the corner-id corner. It works only for a 1D, 2D or 3D bounding box. * For 1D bounding boxes corners are named by 2 enum constants: * BottomLeft and BottomRight. * For 2D bounding boxes, corners are named by 4 enum constants: * BottomLeft, BottomRight, TopLeft, TopRight. * For 3D bounding boxes, the following names are added: * BottomLeftCeil, BottomRightCeil, TopLeftCeil, TopRightCeil. */ inline VectorType corner(CornerType corner) const { EIGEN_STATIC_ASSERT(_AmbientDim <= 3, THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE); VectorType res; Index mult = 1; for(Index d=0; d(Scalar(0), Scalar(1)); } else r[d] = internal::random(m_min[d], m_max[d]); } return r; } /** \returns true if the point \a p is inside the box \c *this. */ template inline bool contains(const MatrixBase& p) const { typename internal::nested::type p_n(p.derived()); return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all(); } /** \returns true if the box \a b is entirely inside the box \c *this. */ inline bool contains(const AlignedBox& b) const { return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); } /** \returns true if the box \a b is intersecting the box \c *this. * \sa intersection, clamp */ inline bool intersects(const AlignedBox& b) const { return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); } /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. * \sa extend(const AlignedBox&) */ template inline AlignedBox& extend(const MatrixBase& p) { typename internal::nested::type p_n(p.derived()); m_min = m_min.cwiseMin(p_n); m_max = m_max.cwiseMax(p_n); return *this; } /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. * \sa merged, extend(const MatrixBase&) */ inline AlignedBox& extend(const AlignedBox& b) { m_min = m_min.cwiseMin(b.m_min); m_max = m_max.cwiseMax(b.m_max); return *this; } /** Clamps \c *this by the box \a b and returns a reference to \c *this. * \note If the boxes don't intersect, the resulting box is empty. * \sa intersection(), intersects() */ inline AlignedBox& clamp(const AlignedBox& b) { m_min = m_min.cwiseMax(b.m_min); m_max = m_max.cwiseMin(b.m_max); return *this; } /** Returns an AlignedBox that is the intersection of \a b and \c *this * \note If the boxes don't intersect, the resulting box is empty. * \sa intersects(), clamp, contains() */ inline AlignedBox intersection(const AlignedBox& b) const {return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); } /** Returns an AlignedBox that is the union of \a b and \c *this. * \note Merging with an empty box may result in a box bigger than \c *this. * \sa extend(const AlignedBox&) */ inline AlignedBox merged(const AlignedBox& b) const { return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); } /** Translate \c *this by the vector \a t and returns a reference to \c *this. */ template inline AlignedBox& translate(const MatrixBase& a_t) { const typename internal::nested::type t(a_t.derived()); m_min += t; m_max += t; return *this; } /** \returns the squared distance between the point \a p and the box \c *this, * and zero if \a p is inside the box. * \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&) */ template inline Scalar squaredExteriorDistance(const MatrixBase& p) const; /** \returns the squared distance between the boxes \a b and \c *this, * and zero if the boxes intersect. * \sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&) */ inline Scalar squaredExteriorDistance(const AlignedBox& b) const; /** \returns the distance between the point \a p and the box \c *this, * and zero if \a p is inside the box. * \sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&) */ template inline NonInteger exteriorDistance(const MatrixBase& p) const { using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(p))); } /** \returns the distance between the boxes \a b and \c *this, * and zero if the boxes intersect. * \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&) */ inline NonInteger exteriorDistance(const AlignedBox& b) const { using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit AlignedBox(const AlignedBox& other) { m_min = (other.min)().template cast(); m_max = (other.max)().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const AlignedBox& other, const RealScalar& prec = ScalarTraits::dummy_precision()) const { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); } protected: VectorType m_min, m_max; }; template template inline Scalar AlignedBox::squaredExteriorDistance(const MatrixBase& a_p) const { typename internal::nested::type p(a_p.derived()); Scalar dist2(0); Scalar aux; for (Index k=0; k p[k] ) { aux = m_min[k] - p[k]; dist2 += aux*aux; } else if( p[k] > m_max[k] ) { aux = p[k] - m_max[k]; dist2 += aux*aux; } } return dist2; } template inline Scalar AlignedBox::squaredExteriorDistance(const AlignedBox& b) const { Scalar dist2(0); Scalar aux; for (Index k=0; k b.m_max[k] ) { aux = m_min[k] - b.m_max[k]; dist2 += aux*aux; } else if( b.m_min[k] > m_max[k] ) { aux = b.m_min[k] - m_max[k]; dist2 += aux*aux; } } return dist2; } /** \defgroup alignedboxtypedefs Global aligned box typedefs * * \ingroup Geometry_Module * * Eigen defines several typedef shortcuts for most common aligned box types. * * The general patterns are the following: * * \c AlignedBoxSizeType where \c Size can be \c 1, \c 2,\c 3,\c 4 for fixed size boxes or \c X for dynamic size, * and where \c Type can be \c i for integer, \c f for float, \c d for double. * * For example, \c AlignedBox3d is a fixed-size 3x3 aligned box type of doubles, and \c AlignedBoxXf is a dynamic-size aligned box of floats. * * \sa class AlignedBox */ #define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ /** \ingroup alignedboxtypedefs */ \ typedef AlignedBox AlignedBox##SizeSuffix##TypeSuffix; #define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 1, 1) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d) #undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES #undef EIGEN_MAKE_TYPEDEFS } // end namespace Eigen #endif // EIGEN_ALIGNEDBOX_H RcppEigen/inst/include/Eigen/src/Geometry/AngleAxis.h0000644000176200001440000001670512647570720022206 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ANGLEAXIS_H #define EIGEN_ANGLEAXIS_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class AngleAxis * * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis * * \param _Scalar the scalar type, i.e., the type of the coefficients. * * \warning When setting up an AngleAxis object, the axis vector \b must \b be \b normalized. * * The following two typedefs are provided for convenience: * \li \c AngleAxisf for \c float * \li \c AngleAxisd for \c double * * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily * mimic Euler-angles. Here is an example: * \include AngleAxis_mimic_euler.cpp * Output: \verbinclude AngleAxis_mimic_euler.out * * \note This class is not aimed to be used to store a rotation transformation, * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix) * and transformation objects. * * \sa class Quaternion, class Transform, MatrixBase::UnitX() */ namespace internal { template struct traits > { typedef _Scalar Scalar; }; } template class AngleAxis : public RotationBase,3> { typedef RotationBase,3> Base; public: using Base::operator*; enum { Dim = 3 }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; typedef Matrix Matrix3; typedef Matrix Vector3; typedef Quaternion QuaternionType; protected: Vector3 m_axis; Scalar m_angle; public: /** Default constructor without initialization. */ AngleAxis() {} /** Constructs and initialize the angle-axis rotation from an \a angle in radian * and an \a axis which \b must \b be \b normalized. * * \warning If the \a axis vector is not normalized, then the angle-axis object * represents an invalid rotation. */ template inline AngleAxis(const Scalar& angle, const MatrixBase& axis) : m_axis(axis), m_angle(angle) {} /** Constructs and initialize the angle-axis rotation from a quaternion \a q. */ template inline explicit AngleAxis(const QuaternionBase& q) { *this = q; } /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */ template inline explicit AngleAxis(const MatrixBase& m) { *this = m; } Scalar angle() const { return m_angle; } Scalar& angle() { return m_angle; } const Vector3& axis() const { return m_axis; } Vector3& axis() { return m_axis; } /** Concatenates two rotations */ inline QuaternionType operator* (const AngleAxis& other) const { return QuaternionType(*this) * QuaternionType(other); } /** Concatenates two rotations */ inline QuaternionType operator* (const QuaternionType& other) const { return QuaternionType(*this) * other; } /** Concatenates two rotations */ friend inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b) { return a * QuaternionType(b); } /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */ AngleAxis inverse() const { return AngleAxis(-m_angle, m_axis); } template AngleAxis& operator=(const QuaternionBase& q); template AngleAxis& operator=(const MatrixBase& m); template AngleAxis& fromRotationMatrix(const MatrixBase& m); Matrix3 toRotationMatrix(void) const; /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit AngleAxis(const AngleAxis& other) { m_axis = other.axis().template cast(); m_angle = Scalar(other.angle()); } static inline const AngleAxis Identity() { return AngleAxis(Scalar(0), Vector3::UnitX()); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const AngleAxis& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const { return m_axis.isApprox(other.m_axis, prec) && internal::isApprox(m_angle,other.m_angle, prec); } }; /** \ingroup Geometry_Module * single precision angle-axis type */ typedef AngleAxis AngleAxisf; /** \ingroup Geometry_Module * double precision angle-axis type */ typedef AngleAxis AngleAxisd; /** Set \c *this from a \b unit quaternion. * The axis is normalized. * * \warning As any other method dealing with quaternion, if the input quaternion * is not normalized then the result is undefined. */ template template AngleAxis& AngleAxis::operator=(const QuaternionBase& q) { using std::acos; using std::min; using std::max; using std::sqrt; Scalar n2 = q.vec().squaredNorm(); if (n2 < NumTraits::dummy_precision()*NumTraits::dummy_precision()) { m_angle = Scalar(0); m_axis << Scalar(1), Scalar(0), Scalar(0); } else { m_angle = Scalar(2)*acos((min)((max)(Scalar(-1),q.w()),Scalar(1))); m_axis = q.vec() / sqrt(n2); } return *this; } /** Set \c *this from a 3x3 rotation matrix \a mat. */ template template AngleAxis& AngleAxis::operator=(const MatrixBase& mat) { // Since a direct conversion would not be really faster, // let's use the robust Quaternion implementation: return *this = QuaternionType(mat); } /** * \brief Sets \c *this from a 3x3 rotation matrix. **/ template template AngleAxis& AngleAxis::fromRotationMatrix(const MatrixBase& mat) { return *this = QuaternionType(mat); } /** Constructs and \returns an equivalent 3x3 rotation matrix. */ template typename AngleAxis::Matrix3 AngleAxis::toRotationMatrix(void) const { using std::sin; using std::cos; Matrix3 res; Vector3 sin_axis = sin(m_angle) * m_axis; Scalar c = cos(m_angle); Vector3 cos1_axis = (Scalar(1)-c) * m_axis; Scalar tmp; tmp = cos1_axis.x() * m_axis.y(); res.coeffRef(0,1) = tmp - sin_axis.z(); res.coeffRef(1,0) = tmp + sin_axis.z(); tmp = cos1_axis.x() * m_axis.z(); res.coeffRef(0,2) = tmp + sin_axis.y(); res.coeffRef(2,0) = tmp - sin_axis.y(); tmp = cos1_axis.y() * m_axis.z(); res.coeffRef(1,2) = tmp - sin_axis.x(); res.coeffRef(2,1) = tmp + sin_axis.x(); res.diagonal() = (cos1_axis.cwiseProduct(m_axis)).array() + c; return res; } } // end namespace Eigen #endif // EIGEN_ANGLEAXIS_H RcppEigen/inst/include/Eigen/src/LU/0000755000176200001440000000000012550462273016672 5ustar liggesusersRcppEigen/inst/include/Eigen/src/LU/arch/0000755000176200001440000000000012253717461017611 5ustar liggesusersRcppEigen/inst/include/Eigen/src/LU/arch/Inverse_SSE.h0000644000176200001440000003144412253717461022115 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2001 Intel Corporation // Copyright (C) 2010 Gael Guennebaud // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // The SSE code for the 4x4 float and double matrix inverse in this file // comes from the following Intel's library: // http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/ // // Here is the respective copyright and license statement: // // Copyright (c) 2001 Intel Corporation. // // Permition is granted to use, copy, distribute and prepare derivative works // of this library for any purpose and without fee, provided, that the above // copyright notice and this statement appear in all copies. // Intel makes no representations about the suitability of this software for // any purpose, and specifically disclaims all warranties. // See LEGAL.TXT for all the legal information. #ifndef EIGEN_INVERSE_SSE_H #define EIGEN_INVERSE_SSE_H namespace Eigen { namespace internal { template struct compute_inverse_size4 { enum { MatrixAlignment = bool(MatrixType::Flags&AlignedBit), ResultAlignment = bool(ResultType::Flags&AlignedBit), StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit) }; static void run(const MatrixType& matrix, ResultType& result) { EIGEN_ALIGN16 const unsigned int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 }; // Load the full matrix into registers __m128 _L1 = matrix.template packet( 0); __m128 _L2 = matrix.template packet( 4); __m128 _L3 = matrix.template packet( 8); __m128 _L4 = matrix.template packet(12); // The inverse is calculated using "Divide and Conquer" technique. The // original matrix is divide into four 2x2 sub-matrices. Since each // register holds four matrix element, the smaller matrices are // represented as a registers. Hence we get a better locality of the // calculations. __m128 A, B, C, D; // the four sub-matrices if(!StorageOrdersMatch) { A = _mm_unpacklo_ps(_L1, _L2); B = _mm_unpacklo_ps(_L3, _L4); C = _mm_unpackhi_ps(_L1, _L2); D = _mm_unpackhi_ps(_L3, _L4); } else { A = _mm_movelh_ps(_L1, _L2); B = _mm_movehl_ps(_L2, _L1); C = _mm_movelh_ps(_L3, _L4); D = _mm_movehl_ps(_L4, _L3); } __m128 iA, iB, iC, iD, // partial inverse of the sub-matrices DC, AB; __m128 dA, dB, dC, dD; // determinant of the sub-matrices __m128 det, d, d1, d2; __m128 rd; // reciprocal of the determinant // AB = A# * B AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B); AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E))); // DC = D# * C DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C); DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E))); // dA = |A| dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A); dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA)); // dB = |B| dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B); dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB)); // dC = |C| dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C); dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC)); // dD = |D| dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D); dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD)); // d = trace(AB*DC) = trace(A#*B*D#*C) d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB); // iD = C*A#*B iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB)); iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB))); // iA = B*D#*C iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC)); iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC))); // d = trace(AB*DC) = trace(A#*B*D#*C) [continue] d = _mm_add_ps(d, _mm_movehl_ps(d, d)); d = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1)); d1 = _mm_mul_ss(dA,dD); d2 = _mm_mul_ss(dB,dC); // iD = D*|A| - C*A#*B iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD); // iA = A*|D| - B*D#*C; iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA); // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C) det = _mm_sub_ss(_mm_add_ss(d1,d2),d); rd = _mm_div_ss(_mm_set_ss(1.0f), det); // #ifdef ZERO_SINGULAR // rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd); // #endif // iB = D * (A#B)# = D*B#*A iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33)); iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66))); // iC = A * (D#C)# = A*C#*D iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33)); iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66))); rd = _mm_shuffle_ps(rd,rd,0); rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP)); // iB = C*|B| - D*B#*A iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB); // iC = B*|C| - A*C#*D; iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC); // iX = iX / det iA = _mm_mul_ps(rd,iA); iB = _mm_mul_ps(rd,iB); iC = _mm_mul_ps(rd,iC); iD = _mm_mul_ps(rd,iD); result.template writePacket( 0, _mm_shuffle_ps(iA,iB,0x77)); result.template writePacket( 4, _mm_shuffle_ps(iA,iB,0x22)); result.template writePacket( 8, _mm_shuffle_ps(iC,iD,0x77)); result.template writePacket(12, _mm_shuffle_ps(iC,iD,0x22)); } }; template struct compute_inverse_size4 { enum { MatrixAlignment = bool(MatrixType::Flags&AlignedBit), ResultAlignment = bool(ResultType::Flags&AlignedBit), StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit) }; static void run(const MatrixType& matrix, ResultType& result) { const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); // The inverse is calculated using "Divide and Conquer" technique. The // original matrix is divide into four 2x2 sub-matrices. Since each // register of the matrix holds two element, the smaller matrices are // consisted of two registers. Hence we get a better locality of the // calculations. // the four sub-matrices __m128d A1, A2, B1, B2, C1, C2, D1, D2; if(StorageOrdersMatch) { A1 = matrix.template packet( 0); B1 = matrix.template packet( 2); A2 = matrix.template packet( 4); B2 = matrix.template packet( 6); C1 = matrix.template packet( 8); D1 = matrix.template packet(10); C2 = matrix.template packet(12); D2 = matrix.template packet(14); } else { __m128d tmp; A1 = matrix.template packet( 0); C1 = matrix.template packet( 2); A2 = matrix.template packet( 4); C2 = matrix.template packet( 6); tmp = A1; A1 = _mm_unpacklo_pd(A1,A2); A2 = _mm_unpackhi_pd(tmp,A2); tmp = C1; C1 = _mm_unpacklo_pd(C1,C2); C2 = _mm_unpackhi_pd(tmp,C2); B1 = matrix.template packet( 8); D1 = matrix.template packet(10); B2 = matrix.template packet(12); D2 = matrix.template packet(14); tmp = B1; B1 = _mm_unpacklo_pd(B1,B2); B2 = _mm_unpackhi_pd(tmp,B2); tmp = D1; D1 = _mm_unpacklo_pd(D1,D2); D2 = _mm_unpackhi_pd(tmp,D2); } __m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2, // partial invese of the sub-matrices DC1, DC2, AB1, AB2; __m128d dA, dB, dC, dD; // determinant of the sub-matrices __m128d det, d1, d2, rd; // dA = |A| dA = _mm_shuffle_pd(A2, A2, 1); dA = _mm_mul_pd(A1, dA); dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3)); // dB = |B| dB = _mm_shuffle_pd(B2, B2, 1); dB = _mm_mul_pd(B1, dB); dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3)); // AB = A# * B AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3)); AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0)); AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3))); AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0))); // dC = |C| dC = _mm_shuffle_pd(C2, C2, 1); dC = _mm_mul_pd(C1, dC); dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3)); // dD = |D| dD = _mm_shuffle_pd(D2, D2, 1); dD = _mm_mul_pd(D1, dD); dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3)); // DC = D# * C DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3)); DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0)); DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3))); DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0))); // rd = trace(AB*DC) = trace(A#*B*D#*C) d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0)); d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3)); rd = _mm_add_pd(d1, d2); rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3)); // iD = C*A#*B iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0)); iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0)); iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3))); iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3))); // iA = B*D#*C iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0)); iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0)); iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3))); iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3))); // iD = D*|A| - C*A#*B dA = _mm_shuffle_pd(dA,dA,0); iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1); iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2); // iA = A*|D| - B*D#*C; dD = _mm_shuffle_pd(dD,dD,0); iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1); iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2); d1 = _mm_mul_sd(dA, dD); d2 = _mm_mul_sd(dB, dC); // iB = D * (A#B)# = D*B#*A iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1)); iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1)); iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2))); iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2))); // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C) det = _mm_add_sd(d1, d2); det = _mm_sub_sd(det, rd); // iC = A * (D#C)# = A*C#*D iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1)); iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1)); iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2))); iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2))); rd = _mm_div_sd(_mm_set_sd(1.0), det); // #ifdef ZERO_SINGULAR // rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd); // #endif rd = _mm_shuffle_pd(rd,rd,0); // iB = C*|B| - D*B#*A dB = _mm_shuffle_pd(dB,dB,0); iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1); iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2); d1 = _mm_xor_pd(rd, _Sign_PN); d2 = _mm_xor_pd(rd, _Sign_NP); // iC = B*|C| - A*C#*D; dC = _mm_shuffle_pd(dC,dC,0); iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1); iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2); result.template writePacket( 0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1)); // iA# / det result.template writePacket( 4, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2)); result.template writePacket( 2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1)); // iB# / det result.template writePacket( 6, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2)); result.template writePacket( 8, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1)); // iC# / det result.template writePacket(12, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2)); result.template writePacket(10, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1)); // iD# / det result.template writePacket(14, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2)); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_INVERSE_SSE_H RcppEigen/inst/include/Eigen/src/LU/Inverse.h0000644000176200001440000003416212253717461020466 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_INVERSE_H #define EIGEN_INVERSE_H namespace Eigen { namespace internal { /********************************** *** General case implementation *** **********************************/ template struct compute_inverse { static inline void run(const MatrixType& matrix, ResultType& result) { result = matrix.partialPivLu().inverse(); } }; template struct compute_inverse_and_det_with_check { /* nothing! general case not supported. */ }; /**************************** *** Size 1 implementation *** ****************************/ template struct compute_inverse { static inline void run(const MatrixType& matrix, ResultType& result) { typedef typename MatrixType::Scalar Scalar; result.coeffRef(0,0) = Scalar(1) / matrix.coeff(0,0); } }; template struct compute_inverse_and_det_with_check { static inline void run( const MatrixType& matrix, const typename MatrixType::RealScalar& absDeterminantThreshold, ResultType& result, typename ResultType::Scalar& determinant, bool& invertible ) { using std::abs; determinant = matrix.coeff(0,0); invertible = abs(determinant) > absDeterminantThreshold; if(invertible) result.coeffRef(0,0) = typename ResultType::Scalar(1) / determinant; } }; /**************************** *** Size 2 implementation *** ****************************/ template inline void compute_inverse_size2_helper( const MatrixType& matrix, const typename ResultType::Scalar& invdet, ResultType& result) { result.coeffRef(0,0) = matrix.coeff(1,1) * invdet; result.coeffRef(1,0) = -matrix.coeff(1,0) * invdet; result.coeffRef(0,1) = -matrix.coeff(0,1) * invdet; result.coeffRef(1,1) = matrix.coeff(0,0) * invdet; } template struct compute_inverse { static inline void run(const MatrixType& matrix, ResultType& result) { typedef typename ResultType::Scalar Scalar; const Scalar invdet = typename MatrixType::Scalar(1) / matrix.determinant(); compute_inverse_size2_helper(matrix, invdet, result); } }; template struct compute_inverse_and_det_with_check { static inline void run( const MatrixType& matrix, const typename MatrixType::RealScalar& absDeterminantThreshold, ResultType& inverse, typename ResultType::Scalar& determinant, bool& invertible ) { using std::abs; typedef typename ResultType::Scalar Scalar; determinant = matrix.determinant(); invertible = abs(determinant) > absDeterminantThreshold; if(!invertible) return; const Scalar invdet = Scalar(1) / determinant; compute_inverse_size2_helper(matrix, invdet, inverse); } }; /**************************** *** Size 3 implementation *** ****************************/ template inline typename MatrixType::Scalar cofactor_3x3(const MatrixType& m) { enum { i1 = (i+1) % 3, i2 = (i+2) % 3, j1 = (j+1) % 3, j2 = (j+2) % 3 }; return m.coeff(i1, j1) * m.coeff(i2, j2) - m.coeff(i1, j2) * m.coeff(i2, j1); } template inline void compute_inverse_size3_helper( const MatrixType& matrix, const typename ResultType::Scalar& invdet, const Matrix& cofactors_col0, ResultType& result) { result.row(0) = cofactors_col0 * invdet; result.coeffRef(1,0) = cofactor_3x3(matrix) * invdet; result.coeffRef(1,1) = cofactor_3x3(matrix) * invdet; result.coeffRef(1,2) = cofactor_3x3(matrix) * invdet; result.coeffRef(2,0) = cofactor_3x3(matrix) * invdet; result.coeffRef(2,1) = cofactor_3x3(matrix) * invdet; result.coeffRef(2,2) = cofactor_3x3(matrix) * invdet; } template struct compute_inverse { static inline void run(const MatrixType& matrix, ResultType& result) { typedef typename ResultType::Scalar Scalar; Matrix cofactors_col0; cofactors_col0.coeffRef(0) = cofactor_3x3(matrix); cofactors_col0.coeffRef(1) = cofactor_3x3(matrix); cofactors_col0.coeffRef(2) = cofactor_3x3(matrix); const Scalar det = (cofactors_col0.cwiseProduct(matrix.col(0))).sum(); const Scalar invdet = Scalar(1) / det; compute_inverse_size3_helper(matrix, invdet, cofactors_col0, result); } }; template struct compute_inverse_and_det_with_check { static inline void run( const MatrixType& matrix, const typename MatrixType::RealScalar& absDeterminantThreshold, ResultType& inverse, typename ResultType::Scalar& determinant, bool& invertible ) { using std::abs; typedef typename ResultType::Scalar Scalar; Matrix cofactors_col0; cofactors_col0.coeffRef(0) = cofactor_3x3(matrix); cofactors_col0.coeffRef(1) = cofactor_3x3(matrix); cofactors_col0.coeffRef(2) = cofactor_3x3(matrix); determinant = (cofactors_col0.cwiseProduct(matrix.col(0))).sum(); invertible = abs(determinant) > absDeterminantThreshold; if(!invertible) return; const Scalar invdet = Scalar(1) / determinant; compute_inverse_size3_helper(matrix, invdet, cofactors_col0, inverse); } }; /**************************** *** Size 4 implementation *** ****************************/ template inline const typename Derived::Scalar general_det3_helper (const MatrixBase& matrix, int i1, int i2, int i3, int j1, int j2, int j3) { return matrix.coeff(i1,j1) * (matrix.coeff(i2,j2) * matrix.coeff(i3,j3) - matrix.coeff(i2,j3) * matrix.coeff(i3,j2)); } template inline typename MatrixType::Scalar cofactor_4x4(const MatrixType& matrix) { enum { i1 = (i+1) % 4, i2 = (i+2) % 4, i3 = (i+3) % 4, j1 = (j+1) % 4, j2 = (j+2) % 4, j3 = (j+3) % 4 }; return general_det3_helper(matrix, i1, i2, i3, j1, j2, j3) + general_det3_helper(matrix, i2, i3, i1, j1, j2, j3) + general_det3_helper(matrix, i3, i1, i2, j1, j2, j3); } template struct compute_inverse_size4 { static void run(const MatrixType& matrix, ResultType& result) { result.coeffRef(0,0) = cofactor_4x4(matrix); result.coeffRef(1,0) = -cofactor_4x4(matrix); result.coeffRef(2,0) = cofactor_4x4(matrix); result.coeffRef(3,0) = -cofactor_4x4(matrix); result.coeffRef(0,2) = cofactor_4x4(matrix); result.coeffRef(1,2) = -cofactor_4x4(matrix); result.coeffRef(2,2) = cofactor_4x4(matrix); result.coeffRef(3,2) = -cofactor_4x4(matrix); result.coeffRef(0,1) = -cofactor_4x4(matrix); result.coeffRef(1,1) = cofactor_4x4(matrix); result.coeffRef(2,1) = -cofactor_4x4(matrix); result.coeffRef(3,1) = cofactor_4x4(matrix); result.coeffRef(0,3) = -cofactor_4x4(matrix); result.coeffRef(1,3) = cofactor_4x4(matrix); result.coeffRef(2,3) = -cofactor_4x4(matrix); result.coeffRef(3,3) = cofactor_4x4(matrix); result /= (matrix.col(0).cwiseProduct(result.row(0).transpose())).sum(); } }; template struct compute_inverse : compute_inverse_size4 { }; template struct compute_inverse_and_det_with_check { static inline void run( const MatrixType& matrix, const typename MatrixType::RealScalar& absDeterminantThreshold, ResultType& inverse, typename ResultType::Scalar& determinant, bool& invertible ) { using std::abs; determinant = matrix.determinant(); invertible = abs(determinant) > absDeterminantThreshold; if(invertible) compute_inverse::run(matrix, inverse); } }; /************************* *** MatrixBase methods *** *************************/ template struct traits > { typedef typename MatrixType::PlainObject ReturnType; }; template struct inverse_impl : public ReturnByValue > { typedef typename MatrixType::Index Index; typedef typename internal::eval::type MatrixTypeNested; typedef typename remove_all::type MatrixTypeNestedCleaned; MatrixTypeNested m_matrix; inverse_impl(const MatrixType& matrix) : m_matrix(matrix) {} inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } template inline void evalTo(Dest& dst) const { const int Size = EIGEN_PLAIN_ENUM_MIN(MatrixType::ColsAtCompileTime,Dest::ColsAtCompileTime); EIGEN_ONLY_USED_FOR_DEBUG(Size); eigen_assert(( (Size<=1) || (Size>4) || (extract_data(m_matrix)!=extract_data(dst))) && "Aliasing problem detected in inverse(), you need to do inverse().eval() here."); compute_inverse::run(m_matrix, dst); } }; } // end namespace internal /** \lu_module * * \returns the matrix inverse of this matrix. * * For small fixed sizes up to 4x4, this method uses cofactors. * In the general case, this method uses class PartialPivLU. * * \note This matrix must be invertible, otherwise the result is undefined. If you need an * invertibility check, do the following: * \li for fixed sizes up to 4x4, use computeInverseAndDetWithCheck(). * \li for the general case, use class FullPivLU. * * Example: \include MatrixBase_inverse.cpp * Output: \verbinclude MatrixBase_inverse.out * * \sa computeInverseAndDetWithCheck() */ template inline const internal::inverse_impl MatrixBase::inverse() const { EIGEN_STATIC_ASSERT(!NumTraits::IsInteger,THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES) eigen_assert(rows() == cols()); return internal::inverse_impl(derived()); } /** \lu_module * * Computation of matrix inverse and determinant, with invertibility check. * * This is only for fixed-size square matrices of size up to 4x4. * * \param inverse Reference to the matrix in which to store the inverse. * \param determinant Reference to the variable in which to store the determinant. * \param invertible Reference to the bool variable in which to store whether the matrix is invertible. * \param absDeterminantThreshold Optional parameter controlling the invertibility check. * The matrix will be declared invertible if the absolute value of its * determinant is greater than this threshold. * * Example: \include MatrixBase_computeInverseAndDetWithCheck.cpp * Output: \verbinclude MatrixBase_computeInverseAndDetWithCheck.out * * \sa inverse(), computeInverseWithCheck() */ template template inline void MatrixBase::computeInverseAndDetWithCheck( ResultType& inverse, typename ResultType::Scalar& determinant, bool& invertible, const RealScalar& absDeterminantThreshold ) const { // i'd love to put some static assertions there, but SFINAE means that they have no effect... eigen_assert(rows() == cols()); // for 2x2, it's worth giving a chance to avoid evaluating. // for larger sizes, evaluating has negligible cost and limits code size. typedef typename internal::conditional< RowsAtCompileTime == 2, typename internal::remove_all::type>::type, PlainObject >::type MatrixType; internal::compute_inverse_and_det_with_check::run (derived(), absDeterminantThreshold, inverse, determinant, invertible); } /** \lu_module * * Computation of matrix inverse, with invertibility check. * * This is only for fixed-size square matrices of size up to 4x4. * * \param inverse Reference to the matrix in which to store the inverse. * \param invertible Reference to the bool variable in which to store whether the matrix is invertible. * \param absDeterminantThreshold Optional parameter controlling the invertibility check. * The matrix will be declared invertible if the absolute value of its * determinant is greater than this threshold. * * Example: \include MatrixBase_computeInverseWithCheck.cpp * Output: \verbinclude MatrixBase_computeInverseWithCheck.out * * \sa inverse(), computeInverseAndDetWithCheck() */ template template inline void MatrixBase::computeInverseWithCheck( ResultType& inverse, bool& invertible, const RealScalar& absDeterminantThreshold ) const { RealScalar determinant; // i'd love to put some static assertions there, but SFINAE means that they have no effect... eigen_assert(rows() == cols()); computeInverseAndDetWithCheck(inverse,determinant,invertible,absDeterminantThreshold); } } // end namespace Eigen #endif // EIGEN_INVERSE_H RcppEigen/inst/include/Eigen/src/LU/Determinant.h0000644000176200001440000000575412253717461021332 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DETERMINANT_H #define EIGEN_DETERMINANT_H namespace Eigen { namespace internal { template inline const typename Derived::Scalar bruteforce_det3_helper (const MatrixBase& matrix, int a, int b, int c) { return matrix.coeff(0,a) * (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b)); } template const typename Derived::Scalar bruteforce_det4_helper (const MatrixBase& matrix, int j, int k, int m, int n) { return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1)) * (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3)); } template struct determinant_impl { static inline typename traits::Scalar run(const Derived& m) { if(Derived::ColsAtCompileTime==Dynamic && m.rows()==0) return typename traits::Scalar(1); return m.partialPivLu().determinant(); } }; template struct determinant_impl { static inline typename traits::Scalar run(const Derived& m) { return m.coeff(0,0); } }; template struct determinant_impl { static inline typename traits::Scalar run(const Derived& m) { return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1); } }; template struct determinant_impl { static inline typename traits::Scalar run(const Derived& m) { return bruteforce_det3_helper(m,0,1,2) - bruteforce_det3_helper(m,1,0,2) + bruteforce_det3_helper(m,2,0,1); } }; template struct determinant_impl { static typename traits::Scalar run(const Derived& m) { // trick by Martin Costabel to compute 4x4 det with only 30 muls return bruteforce_det4_helper(m,0,1,2,3) - bruteforce_det4_helper(m,0,2,1,3) + bruteforce_det4_helper(m,0,3,1,2) + bruteforce_det4_helper(m,1,2,0,3) - bruteforce_det4_helper(m,1,3,0,2) + bruteforce_det4_helper(m,2,3,0,1); } }; } // end namespace internal /** \lu_module * * \returns the determinant of this matrix */ template inline typename internal::traits::Scalar MatrixBase::determinant() const { eigen_assert(rows() == cols()); typedef typename internal::nested::type Nested; return internal::determinant_impl::type>::run(derived()); } } // end namespace Eigen #endif // EIGEN_DETERMINANT_H RcppEigen/inst/include/Eigen/src/LU/PartialPivLU_MKL.h0000644000176200001440000000662712253717461022077 0ustar liggesusers/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to Intel(R) MKL * LU decomposition with partial pivoting based on LAPACKE_?getrf function. ******************************************************************************** */ #ifndef EIGEN_PARTIALLU_LAPACK_H #define EIGEN_PARTIALLU_LAPACK_H #include "Eigen/src/Core/util/MKL_support.h" namespace Eigen { namespace internal { /** \internal Specialization for the data types supported by MKL */ #define EIGEN_MKL_LU_PARTPIV(EIGTYPE, MKLTYPE, MKLPREFIX) \ template \ struct partial_lu_impl \ { \ /* \internal performs the LU decomposition in-place of the matrix represented */ \ static lapack_int blocked_lu(lapack_int rows, lapack_int cols, EIGTYPE* lu_data, lapack_int luStride, lapack_int* row_transpositions, lapack_int& nb_transpositions, lapack_int maxBlockSize=256) \ { \ EIGEN_UNUSED_VARIABLE(maxBlockSize);\ lapack_int matrix_order, first_zero_pivot; \ lapack_int m, n, lda, *ipiv, info; \ EIGTYPE* a; \ /* Set up parameters for ?getrf */ \ matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ lda = luStride; \ a = lu_data; \ ipiv = row_transpositions; \ m = rows; \ n = cols; \ nb_transpositions = 0; \ \ info = LAPACKE_##MKLPREFIX##getrf( matrix_order, m, n, (MKLTYPE*)a, lda, ipiv ); \ \ for(int i=0;i= 0); \ /* something should be done with nb_transpositions */ \ \ first_zero_pivot = info; \ return first_zero_pivot; \ } \ }; EIGEN_MKL_LU_PARTPIV(double, double, d) EIGEN_MKL_LU_PARTPIV(float, float, s) EIGEN_MKL_LU_PARTPIV(dcomplex, MKL_Complex16, z) EIGEN_MKL_LU_PARTPIV(scomplex, MKL_Complex8, c) } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARTIALLU_LAPACK_H RcppEigen/inst/include/Eigen/src/LU/PartialPivLU.h0000644000176200001440000004343112550462273021364 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2009 Benoit Jacob // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARTIALLU_H #define EIGEN_PARTIALLU_H namespace Eigen { /** \ingroup LU_Module * * \class PartialPivLU * * \brief LU decomposition of a matrix with partial pivoting, and related features * * \param MatrixType the type of the matrix of which we are computing the LU decomposition * * This class represents a LU decomposition of a \b square \b invertible matrix, with partial pivoting: the matrix A * is decomposed as A = PLU where L is unit-lower-triangular, U is upper-triangular, and P * is a permutation matrix. * * Typically, partial pivoting LU decomposition is only considered numerically stable for square invertible * matrices. Thus LAPACK's dgesv and dgesvx require the matrix to be square and invertible. The present class * does the same. It will assert that the matrix is square, but it won't (actually it can't) check that the * matrix is invertible: it is your task to check that you only use this decomposition on invertible matrices. * * The guaranteed safe alternative, working for all matrices, is the full pivoting LU decomposition, provided * by class FullPivLU. * * This is \b not a rank-revealing LU decomposition. Many features are intentionally absent from this class, * such as rank computation. If you need these features, use class FullPivLU. * * This LU decomposition is suitable to invert invertible matrices. It is what MatrixBase::inverse() uses * in the general case. * On the other hand, it is \b not suitable to determine whether a given matrix is invertible. * * The data of the LU decomposition can be directly accessed through the methods matrixLU(), permutationP(). * * \sa MatrixBase::partialPivLu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse(), class FullPivLU */ template class PartialPivLU { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename MatrixType::Index Index; typedef PermutationMatrix PermutationType; typedef Transpositions TranspositionType; /** * \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via PartialPivLU::compute(const MatrixType&). */ PartialPivLU(); /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa PartialPivLU() */ PartialPivLU(Index size); /** Constructor. * * \param matrix the matrix of which to compute the LU decomposition. * * \warning The matrix should have full rank (e.g. if it's square, it should be invertible). * If you need to deal with non-full rank, use class FullPivLU instead. */ PartialPivLU(const MatrixType& matrix); PartialPivLU& compute(const MatrixType& matrix); /** \returns the LU decomposition matrix: the upper-triangular part is U, the * unit-lower-triangular part is L (at least for square matrices; in the non-square * case, special care is needed, see the documentation of class FullPivLU). * * \sa matrixL(), matrixU() */ inline const MatrixType& matrixLU() const { eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); return m_lu; } /** \returns the permutation matrix P. */ inline const PermutationType& permutationP() const { eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); return m_p; } /** This method returns the solution x to the equation Ax=b, where A is the matrix of which * *this is the LU decomposition. * * \param b the right-hand-side of the equation to solve. Can be a vector or a matrix, * the only requirement in order for the equation to make sense is that * b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition. * * \returns the solution. * * Example: \include PartialPivLU_solve.cpp * Output: \verbinclude PartialPivLU_solve.out * * Since this PartialPivLU class assumes anyway that the matrix A is invertible, the solution * theoretically exists and is unique regardless of b. * * \sa TriangularView::solve(), inverse(), computeInverse() */ template inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); return internal::solve_retval(*this, b.derived()); } /** \returns the inverse of the matrix of which *this is the LU decomposition. * * \warning The matrix being decomposed here is assumed to be invertible. If you need to check for * invertibility, use class FullPivLU instead. * * \sa MatrixBase::inverse(), LU::inverse() */ inline const internal::solve_retval inverse() const { eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); return internal::solve_retval (*this, MatrixType::Identity(m_lu.rows(), m_lu.cols())); } /** \returns the determinant of the matrix of which * *this is the LU decomposition. It has only linear complexity * (that is, O(n) where n is the dimension of the square matrix) * as the LU decomposition has already been computed. * * \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers * optimized paths. * * \warning a determinant can be very big or small, so for matrices * of large enough dimension, there is a risk of overflow/underflow. * * \sa MatrixBase::determinant() */ typename internal::traits::Scalar determinant() const; MatrixType reconstructedMatrix() const; inline Index rows() const { return m_lu.rows(); } inline Index cols() const { return m_lu.cols(); } protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } MatrixType m_lu; PermutationType m_p; TranspositionType m_rowsTranspositions; Index m_det_p; bool m_isInitialized; }; template PartialPivLU::PartialPivLU() : m_lu(), m_p(), m_rowsTranspositions(), m_det_p(0), m_isInitialized(false) { } template PartialPivLU::PartialPivLU(Index size) : m_lu(size, size), m_p(size), m_rowsTranspositions(size), m_det_p(0), m_isInitialized(false) { } template PartialPivLU::PartialPivLU(const MatrixType& matrix) : m_lu(matrix.rows(), matrix.rows()), m_p(matrix.rows()), m_rowsTranspositions(matrix.rows()), m_det_p(0), m_isInitialized(false) { compute(matrix); } namespace internal { /** \internal This is the blocked version of fullpivlu_unblocked() */ template struct partial_lu_impl { // FIXME add a stride to Map, so that the following mapping becomes easier, // another option would be to create an expression being able to automatically // warp any Map, Matrix, and Block expressions as a unique type, but since that's exactly // a Map + stride, why not adding a stride to Map, and convenient ctors from a Matrix, // and Block. typedef Map > MapLU; typedef Block MatrixType; typedef Block BlockType; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; /** \internal performs the LU decomposition in-place of the matrix \a lu * using an unblocked algorithm. * * In addition, this function returns the row transpositions in the * vector \a row_transpositions which must have a size equal to the number * of columns of the matrix \a lu, and an integer \a nb_transpositions * which returns the actual number of transpositions. * * \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise. */ static Index unblocked_lu(MatrixType& lu, PivIndex* row_transpositions, PivIndex& nb_transpositions) { const Index rows = lu.rows(); const Index cols = lu.cols(); const Index size = (std::min)(rows,cols); nb_transpositions = 0; Index first_zero_pivot = -1; for(Index k = 0; k < size; ++k) { Index rrows = rows-k-1; Index rcols = cols-k-1; Index row_of_biggest_in_col; RealScalar biggest_in_corner = lu.col(k).tail(rows-k).cwiseAbs().maxCoeff(&row_of_biggest_in_col); row_of_biggest_in_col += k; row_transpositions[k] = PivIndex(row_of_biggest_in_col); if(biggest_in_corner != RealScalar(0)) { if(k != row_of_biggest_in_col) { lu.row(k).swap(lu.row(row_of_biggest_in_col)); ++nb_transpositions; } // FIXME shall we introduce a safe quotient expression in cas 1/lu.coeff(k,k) // overflow but not the actual quotient? lu.col(k).tail(rrows) /= lu.coeff(k,k); } else if(first_zero_pivot==-1) { // the pivot is exactly zero, we record the index of the first pivot which is exactly 0, // and continue the factorization such we still have A = PLU first_zero_pivot = k; } if(k > > */ static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256) { MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols); MatrixType lu(lu1,0,0,rows,cols); const Index size = (std::min)(rows,cols); // if the matrix is too small, no blocking: if(size<=16) { return unblocked_lu(lu, row_transpositions, nb_transpositions); } // automatically adjust the number of subdivisions to the size // of the matrix so that there is enough sub blocks: Index blockSize; { blockSize = size/8; blockSize = (blockSize/16)*16; blockSize = (std::min)((std::max)(blockSize,Index(8)), maxBlockSize); } nb_transpositions = 0; Index first_zero_pivot = -1; for(Index k = 0; k < size; k+=blockSize) { Index bs = (std::min)(size-k,blockSize); // actual size of the block Index trows = rows - k - bs; // trailing rows Index tsize = size - k - bs; // trailing size // partition the matrix: // A00 | A01 | A02 // lu = A_0 | A_1 | A_2 = A10 | A11 | A12 // A20 | A21 | A22 BlockType A_0(lu,0,0,rows,k); BlockType A_2(lu,0,k+bs,rows,tsize); BlockType A11(lu,k,k,bs,bs); BlockType A12(lu,k,k+bs,bs,tsize); BlockType A21(lu,k+bs,k,trows,bs); BlockType A22(lu,k+bs,k+bs,trows,tsize); PivIndex nb_transpositions_in_panel; // recursively call the blocked LU algorithm on [A11^T A21^T]^T // with a very small blocking size: Index ret = blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride, row_transpositions+k, nb_transpositions_in_panel, 16); if(ret>=0 && first_zero_pivot==-1) first_zero_pivot = k+ret; nb_transpositions += nb_transpositions_in_panel; // update permutations and apply them to A_0 for(Index i=k; i().solveInPlace(A12); A22.noalias() -= A21 * A12; } } return first_zero_pivot; } }; /** \internal performs the LU decomposition with partial pivoting in-place. */ template void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::Index& nb_transpositions) { eigen_assert(lu.cols() == row_transpositions.size()); eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1); partial_lu_impl ::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions); } } // end namespace internal template PartialPivLU& PartialPivLU::compute(const MatrixType& matrix) { check_template_parameters(); // the row permutation is stored as int indices, so just to be sure: eigen_assert(matrix.rows()::highest()); m_lu = matrix; eigen_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices"); const Index size = matrix.rows(); m_rowsTranspositions.resize(size); typename TranspositionType::Index nb_transpositions; internal::partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions); m_det_p = (nb_transpositions%2) ? -1 : 1; m_p = m_rowsTranspositions; m_isInitialized = true; return *this; } template typename internal::traits::Scalar PartialPivLU::determinant() const { eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); return Scalar(m_det_p) * m_lu.diagonal().prod(); } /** \returns the matrix represented by the decomposition, * i.e., it returns the product: P^{-1} L U. * This function is provided for debug purpose. */ template MatrixType PartialPivLU::reconstructedMatrix() const { eigen_assert(m_isInitialized && "LU is not initialized."); // LU MatrixType res = m_lu.template triangularView().toDenseMatrix() * m_lu.template triangularView(); // P^{-1}(LU) res = m_p.inverse() * res; return res; } /***** Implementation of solve() *****************************************************/ namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { EIGEN_MAKE_SOLVE_HELPERS(PartialPivLU<_MatrixType>,Rhs) template void evalTo(Dest& dst) const { /* The decomposition PA = LU can be rewritten as A = P^{-1} L U. * So we proceed as follows: * Step 1: compute c = Pb. * Step 2: replace c by the solution x to Lx = c. * Step 3: replace c by the solution x to Ux = c. */ eigen_assert(rhs().rows() == dec().matrixLU().rows()); // Step 1 dst = dec().permutationP() * rhs(); // Step 2 dec().matrixLU().template triangularView().solveInPlace(dst); // Step 3 dec().matrixLU().template triangularView().solveInPlace(dst); } }; } // end namespace internal /******** MatrixBase methods *******/ /** \lu_module * * \return the partial-pivoting LU decomposition of \c *this. * * \sa class PartialPivLU */ template inline const PartialPivLU::PlainObject> MatrixBase::partialPivLu() const { return PartialPivLU(eval()); } #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS /** \lu_module * * Synonym of partialPivLu(). * * \return the partial-pivoting LU decomposition of \c *this. * * \sa class PartialPivLU */ template inline const PartialPivLU::PlainObject> MatrixBase::lu() const { return PartialPivLU(eval()); } #endif } // end namespace Eigen #endif // EIGEN_PARTIALLU_H RcppEigen/inst/include/Eigen/src/LU/FullPivLU.h0000644000176200001440000006722712550462273020703 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LU_H #define EIGEN_LU_H namespace Eigen { /** \ingroup LU_Module * * \class FullPivLU * * \brief LU decomposition of a matrix with complete pivoting, and related features * * \param MatrixType the type of the matrix of which we are computing the LU decomposition * * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is * decomposed as \f$ A = P^{-1} L U Q^{-1} \f$ where L is unit-lower-triangular, U is * upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU * decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any * zeros are at the end. * * This decomposition provides the generic approach to solving systems of linear equations, computing * the rank, invertibility, inverse, kernel, and determinant. * * This LU decomposition is very stable and well tested with large matrices. However there are use cases where the SVD * decomposition is inherently more stable and/or flexible. For example, when computing the kernel of a matrix, * working with the SVD allows to select the smallest singular values of the matrix, something that * the LU decomposition doesn't see. * * The data of the LU decomposition can be directly accessed through the methods matrixLU(), * permutationP(), permutationQ(). * * As an exemple, here is how the original matrix can be retrieved: * \include class_FullPivLU.cpp * Output: \verbinclude class_FullPivLU.out * * \sa MatrixBase::fullPivLu(), MatrixBase::determinant(), MatrixBase::inverse() */ template class FullPivLU { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename MatrixType::Index Index; typedef typename internal::plain_row_type::type IntRowVectorType; typedef typename internal::plain_col_type::type IntColVectorType; typedef PermutationMatrix PermutationQType; typedef PermutationMatrix PermutationPType; /** * \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via LU::compute(const MatrixType&). */ FullPivLU(); /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa FullPivLU() */ FullPivLU(Index rows, Index cols); /** Constructor. * * \param matrix the matrix of which to compute the LU decomposition. * It is required to be nonzero. */ FullPivLU(const MatrixType& matrix); /** Computes the LU decomposition of the given matrix. * * \param matrix the matrix of which to compute the LU decomposition. * It is required to be nonzero. * * \returns a reference to *this */ FullPivLU& compute(const MatrixType& matrix); /** \returns the LU decomposition matrix: the upper-triangular part is U, the * unit-lower-triangular part is L (at least for square matrices; in the non-square * case, special care is needed, see the documentation of class FullPivLU). * * \sa matrixL(), matrixU() */ inline const MatrixType& matrixLU() const { eigen_assert(m_isInitialized && "LU is not initialized."); return m_lu; } /** \returns the number of nonzero pivots in the LU decomposition. * Here nonzero is meant in the exact sense, not in a fuzzy sense. * So that notion isn't really intrinsically interesting, but it is * still useful when implementing algorithms. * * \sa rank() */ inline Index nonzeroPivots() const { eigen_assert(m_isInitialized && "LU is not initialized."); return m_nonzero_pivots; } /** \returns the absolute value of the biggest pivot, i.e. the biggest * diagonal coefficient of U. */ RealScalar maxPivot() const { return m_maxpivot; } /** \returns the permutation matrix P * * \sa permutationQ() */ inline const PermutationPType& permutationP() const { eigen_assert(m_isInitialized && "LU is not initialized."); return m_p; } /** \returns the permutation matrix Q * * \sa permutationP() */ inline const PermutationQType& permutationQ() const { eigen_assert(m_isInitialized && "LU is not initialized."); return m_q; } /** \returns the kernel of the matrix, also called its null-space. The columns of the returned matrix * will form a basis of the kernel. * * \note If the kernel has dimension zero, then the returned matrix is a column-vector filled with zeros. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). * * Example: \include FullPivLU_kernel.cpp * Output: \verbinclude FullPivLU_kernel.out * * \sa image() */ inline const internal::kernel_retval kernel() const { eigen_assert(m_isInitialized && "LU is not initialized."); return internal::kernel_retval(*this); } /** \returns the image of the matrix, also called its column-space. The columns of the returned matrix * will form a basis of the kernel. * * \param originalMatrix the original matrix, of which *this is the LU decomposition. * The reason why it is needed to pass it here, is that this allows * a large optimization, as otherwise this method would need to reconstruct it * from the LU decomposition. * * \note If the image has dimension zero, then the returned matrix is a column-vector filled with zeros. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). * * Example: \include FullPivLU_image.cpp * Output: \verbinclude FullPivLU_image.out * * \sa kernel() */ inline const internal::image_retval image(const MatrixType& originalMatrix) const { eigen_assert(m_isInitialized && "LU is not initialized."); return internal::image_retval(*this, originalMatrix); } /** \return a solution x to the equation Ax=b, where A is the matrix of which * *this is the LU decomposition. * * \param b the right-hand-side of the equation to solve. Can be a vector or a matrix, * the only requirement in order for the equation to make sense is that * b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition. * * \returns a solution. * * \note_about_checking_solutions * * \note_about_arbitrary_choice_of_solution * \note_about_using_kernel_to_study_multiple_solutions * * Example: \include FullPivLU_solve.cpp * Output: \verbinclude FullPivLU_solve.out * * \sa TriangularView::solve(), kernel(), inverse() */ template inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(m_isInitialized && "LU is not initialized."); return internal::solve_retval(*this, b.derived()); } /** \returns the determinant of the matrix of which * *this is the LU decomposition. It has only linear complexity * (that is, O(n) where n is the dimension of the square matrix) * as the LU decomposition has already been computed. * * \note This is only for square matrices. * * \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers * optimized paths. * * \warning a determinant can be very big or small, so for matrices * of large enough dimension, there is a risk of overflow/underflow. * * \sa MatrixBase::determinant() */ typename internal::traits::Scalar determinant() const; /** Allows to prescribe a threshold to be used by certain methods, such as rank(), * who need to determine when pivots are to be considered nonzero. This is not used for the * LU decomposition itself. * * When it needs to get the threshold value, Eigen calls threshold(). By default, this * uses a formula to automatically determine a reasonable threshold. * Once you have called the present method setThreshold(const RealScalar&), * your value is used instead. * * \param threshold The new value to use as the threshold. * * A pivot will be considered nonzero if its absolute value is strictly greater than * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$ * where maxpivot is the biggest pivot. * * If you want to come back to the default behavior, call setThreshold(Default_t) */ FullPivLU& setThreshold(const RealScalar& threshold) { m_usePrescribedThreshold = true; m_prescribedThreshold = threshold; return *this; } /** Allows to come back to the default behavior, letting Eigen use its default formula for * determining the threshold. * * You should pass the special object Eigen::Default as parameter here. * \code lu.setThreshold(Eigen::Default); \endcode * * See the documentation of setThreshold(const RealScalar&). */ FullPivLU& setThreshold(Default_t) { m_usePrescribedThreshold = false; return *this; } /** Returns the threshold that will be used by certain methods such as rank(). * * See the documentation of setThreshold(const RealScalar&). */ RealScalar threshold() const { eigen_assert(m_isInitialized || m_usePrescribedThreshold); return m_usePrescribedThreshold ? m_prescribedThreshold // this formula comes from experimenting (see "LU precision tuning" thread on the list) // and turns out to be identical to Higham's formula used already in LDLt. : NumTraits::epsilon() * m_lu.diagonalSize(); } /** \returns the rank of the matrix of which *this is the LU decomposition. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline Index rank() const { using std::abs; eigen_assert(m_isInitialized && "LU is not initialized."); RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); Index result = 0; for(Index i = 0; i < m_nonzero_pivots; ++i) result += (abs(m_lu.coeff(i,i)) > premultiplied_threshold); return result; } /** \returns the dimension of the kernel of the matrix of which *this is the LU decomposition. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline Index dimensionOfKernel() const { eigen_assert(m_isInitialized && "LU is not initialized."); return cols() - rank(); } /** \returns true if the matrix of which *this is the LU decomposition represents an injective * linear map, i.e. has trivial kernel; false otherwise. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline bool isInjective() const { eigen_assert(m_isInitialized && "LU is not initialized."); return rank() == cols(); } /** \returns true if the matrix of which *this is the LU decomposition represents a surjective * linear map; false otherwise. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline bool isSurjective() const { eigen_assert(m_isInitialized && "LU is not initialized."); return rank() == rows(); } /** \returns true if the matrix of which *this is the LU decomposition is invertible. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline bool isInvertible() const { eigen_assert(m_isInitialized && "LU is not initialized."); return isInjective() && (m_lu.rows() == m_lu.cols()); } /** \returns the inverse of the matrix of which *this is the LU decomposition. * * \note If this matrix is not invertible, the returned matrix has undefined coefficients. * Use isInvertible() to first determine whether this matrix is invertible. * * \sa MatrixBase::inverse() */ inline const internal::solve_retval inverse() const { eigen_assert(m_isInitialized && "LU is not initialized."); eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the inverse of a non-square matrix!"); return internal::solve_retval (*this, MatrixType::Identity(m_lu.rows(), m_lu.cols())); } MatrixType reconstructedMatrix() const; inline Index rows() const { return m_lu.rows(); } inline Index cols() const { return m_lu.cols(); } protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } MatrixType m_lu; PermutationPType m_p; PermutationQType m_q; IntColVectorType m_rowsTranspositions; IntRowVectorType m_colsTranspositions; Index m_det_pq, m_nonzero_pivots; RealScalar m_maxpivot, m_prescribedThreshold; bool m_isInitialized, m_usePrescribedThreshold; }; template FullPivLU::FullPivLU() : m_isInitialized(false), m_usePrescribedThreshold(false) { } template FullPivLU::FullPivLU(Index rows, Index cols) : m_lu(rows, cols), m_p(rows), m_q(cols), m_rowsTranspositions(rows), m_colsTranspositions(cols), m_isInitialized(false), m_usePrescribedThreshold(false) { } template FullPivLU::FullPivLU(const MatrixType& matrix) : m_lu(matrix.rows(), matrix.cols()), m_p(matrix.rows()), m_q(matrix.cols()), m_rowsTranspositions(matrix.rows()), m_colsTranspositions(matrix.cols()), m_isInitialized(false), m_usePrescribedThreshold(false) { compute(matrix); } template FullPivLU& FullPivLU::compute(const MatrixType& matrix) { check_template_parameters(); // the permutations are stored as int indices, so just to be sure: eigen_assert(matrix.rows()<=NumTraits::highest() && matrix.cols()<=NumTraits::highest()); m_isInitialized = true; m_lu = matrix; const Index size = matrix.diagonalSize(); const Index rows = matrix.rows(); const Index cols = matrix.cols(); // will store the transpositions, before we accumulate them at the end. // can't accumulate on-the-fly because that will be done in reverse order for the rows. m_rowsTranspositions.resize(matrix.rows()); m_colsTranspositions.resize(matrix.cols()); Index number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) m_maxpivot = RealScalar(0); for(Index k = 0; k < size; ++k) { // First, we need to find the pivot. // biggest coefficient in the remaining bottom-right corner (starting at row k, col k) Index row_of_biggest_in_corner, col_of_biggest_in_corner; RealScalar biggest_in_corner; biggest_in_corner = m_lu.bottomRightCorner(rows-k, cols-k) .cwiseAbs() .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner); row_of_biggest_in_corner += k; // correct the values! since they were computed in the corner, col_of_biggest_in_corner += k; // need to add k to them. if(biggest_in_corner==RealScalar(0)) { // before exiting, make sure to initialize the still uninitialized transpositions // in a sane state without destroying what we already have. m_nonzero_pivots = k; for(Index i = k; i < size; ++i) { m_rowsTranspositions.coeffRef(i) = i; m_colsTranspositions.coeffRef(i) = i; } break; } if(biggest_in_corner > m_maxpivot) m_maxpivot = biggest_in_corner; // Now that we've found the pivot, we need to apply the row/col swaps to // bring it to the location (k,k). m_rowsTranspositions.coeffRef(k) = row_of_biggest_in_corner; m_colsTranspositions.coeffRef(k) = col_of_biggest_in_corner; if(k != row_of_biggest_in_corner) { m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner)); ++number_of_transpositions; } if(k != col_of_biggest_in_corner) { m_lu.col(k).swap(m_lu.col(col_of_biggest_in_corner)); ++number_of_transpositions; } // Now that the pivot is at the right location, we update the remaining // bottom-right corner by Gaussian elimination. if(k= 0; --k) m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k)); m_q.setIdentity(cols); for(Index k = 0; k < size; ++k) m_q.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k)); m_det_pq = (number_of_transpositions%2) ? -1 : 1; return *this; } template typename internal::traits::Scalar FullPivLU::determinant() const { eigen_assert(m_isInitialized && "LU is not initialized."); eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the determinant of a non-square matrix!"); return Scalar(m_det_pq) * Scalar(m_lu.diagonal().prod()); } /** \returns the matrix represented by the decomposition, * i.e., it returns the product: \f$ P^{-1} L U Q^{-1} \f$. * This function is provided for debug purposes. */ template MatrixType FullPivLU::reconstructedMatrix() const { eigen_assert(m_isInitialized && "LU is not initialized."); const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols()); // LU MatrixType res(m_lu.rows(),m_lu.cols()); // FIXME the .toDenseMatrix() should not be needed... res = m_lu.leftCols(smalldim) .template triangularView().toDenseMatrix() * m_lu.topRows(smalldim) .template triangularView().toDenseMatrix(); // P^{-1}(LU) res = m_p.inverse() * res; // (P^{-1}LU)Q^{-1} res = res * m_q.inverse(); return res; } /********* Implementation of kernel() **************************************************/ namespace internal { template struct kernel_retval > : kernel_retval_base > { EIGEN_MAKE_KERNEL_HELPERS(FullPivLU<_MatrixType>) enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED( MatrixType::MaxColsAtCompileTime, MatrixType::MaxRowsAtCompileTime) }; template void evalTo(Dest& dst) const { using std::abs; const Index cols = dec().matrixLU().cols(), dimker = cols - rank(); if(dimker == 0) { // The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's // avoid crashing/asserting as that depends on floating point calculations. Let's // just return a single column vector filled with zeros. dst.setZero(); return; } /* Let us use the following lemma: * * Lemma: If the matrix A has the LU decomposition PAQ = LU, * then Ker A = Q(Ker U). * * Proof: trivial: just keep in mind that P, Q, L are invertible. */ /* Thus, all we need to do is to compute Ker U, and then apply Q. * * U is upper triangular, with eigenvalues sorted so that any zeros appear at the end. * Thus, the diagonal of U ends with exactly * dimKer zero's. Let us use that to construct dimKer linearly * independent vectors in Ker U. */ Matrix pivots(rank()); RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold(); Index p = 0; for(Index i = 0; i < dec().nonzeroPivots(); ++i) if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) pivots.coeffRef(p++) = i; eigen_internal_assert(p == rank()); // we construct a temporaty trapezoid matrix m, by taking the U matrix and // permuting the rows and cols to bring the nonnegligible pivots to the top of // the main diagonal. We need that to be able to apply our triangular solvers. // FIXME when we get triangularView-for-rectangular-matrices, this can be simplified Matrix m(dec().matrixLU().block(0, 0, rank(), cols)); for(Index i = 0; i < rank(); ++i) { if(i) m.row(i).head(i).setZero(); m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i); } m.block(0, 0, rank(), rank()); m.block(0, 0, rank(), rank()).template triangularView().setZero(); for(Index i = 0; i < rank(); ++i) m.col(i).swap(m.col(pivots.coeff(i))); // ok, we have our trapezoid matrix, we can apply the triangular solver. // notice that the math behind this suggests that we should apply this to the // negative of the RHS, but for performance we just put the negative sign elsewhere, see below. m.topLeftCorner(rank(), rank()) .template triangularView().solveInPlace( m.topRightCorner(rank(), dimker) ); // now we must undo the column permutation that we had applied! for(Index i = rank()-1; i >= 0; --i) m.col(i).swap(m.col(pivots.coeff(i))); // see the negative sign in the next line, that's what we were talking about above. for(Index i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker); for(Index i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero(); for(Index k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1); } }; /***** Implementation of image() *****************************************************/ template struct image_retval > : image_retval_base > { EIGEN_MAKE_IMAGE_HELPERS(FullPivLU<_MatrixType>) enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED( MatrixType::MaxColsAtCompileTime, MatrixType::MaxRowsAtCompileTime) }; template void evalTo(Dest& dst) const { using std::abs; if(rank() == 0) { // The Image is just {0}, so it doesn't have a basis properly speaking, but let's // avoid crashing/asserting as that depends on floating point calculations. Let's // just return a single column vector filled with zeros. dst.setZero(); return; } Matrix pivots(rank()); RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold(); Index p = 0; for(Index i = 0; i < dec().nonzeroPivots(); ++i) if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) pivots.coeffRef(p++) = i; eigen_internal_assert(p == rank()); for(Index i = 0; i < rank(); ++i) dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i))); } }; /***** Implementation of solve() *****************************************************/ template struct solve_retval, Rhs> : solve_retval_base, Rhs> { EIGEN_MAKE_SOLVE_HELPERS(FullPivLU<_MatrixType>,Rhs) template void evalTo(Dest& dst) const { /* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}. * So we proceed as follows: * Step 1: compute c = P * rhs. * Step 2: replace c by the solution x to Lx = c. Exists because L is invertible. * Step 3: replace c by the solution x to Ux = c. May or may not exist. * Step 4: result = Q * c; */ const Index rows = dec().rows(), cols = dec().cols(), nonzero_pivots = dec().nonzeroPivots(); eigen_assert(rhs().rows() == rows); const Index smalldim = (std::min)(rows, cols); if(nonzero_pivots == 0) { dst.setZero(); return; } typename Rhs::PlainObject c(rhs().rows(), rhs().cols()); // Step 1 c = dec().permutationP() * rhs(); // Step 2 dec().matrixLU() .topLeftCorner(smalldim,smalldim) .template triangularView() .solveInPlace(c.topRows(smalldim)); if(rows>cols) { c.bottomRows(rows-cols) -= dec().matrixLU().bottomRows(rows-cols) * c.topRows(cols); } // Step 3 dec().matrixLU() .topLeftCorner(nonzero_pivots, nonzero_pivots) .template triangularView() .solveInPlace(c.topRows(nonzero_pivots)); // Step 4 for(Index i = 0; i < nonzero_pivots; ++i) dst.row(dec().permutationQ().indices().coeff(i)) = c.row(i); for(Index i = nonzero_pivots; i < dec().matrixLU().cols(); ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero(); } }; } // end namespace internal /******* MatrixBase methods *****************************************************************/ /** \lu_module * * \return the full-pivoting LU decomposition of \c *this. * * \sa class FullPivLU */ template inline const FullPivLU::PlainObject> MatrixBase::fullPivLu() const { return FullPivLU(eval()); } } // end namespace Eigen #endif // EIGEN_LU_H RcppEigen/inst/include/Eigen/src/PaStiXSupport/0000755000176200001440000000000012253717461021121 5ustar liggesusersRcppEigen/inst/include/Eigen/src/PaStiXSupport/PaStiXSupport.h0000644000176200001440000005545412253717461024054 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PASTIXSUPPORT_H #define EIGEN_PASTIXSUPPORT_H namespace Eigen { /** \ingroup PaStiXSupport_Module * \brief Interface to the PaStix solver * * This class is used to solve the linear systems A.X = B via the PaStix library. * The matrix can be either real or complex, symmetric or not. * * \sa TutorialSparseDirectSolvers */ template class PastixLU; template class PastixLLT; template class PastixLDLT; namespace internal { template struct pastix_traits; template struct pastix_traits< PastixLU<_MatrixType> > { typedef _MatrixType MatrixType; typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; typedef typename _MatrixType::Index Index; }; template struct pastix_traits< PastixLLT<_MatrixType,Options> > { typedef _MatrixType MatrixType; typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; typedef typename _MatrixType::Index Index; }; template struct pastix_traits< PastixLDLT<_MatrixType,Options> > { typedef _MatrixType MatrixType; typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; typedef typename _MatrixType::Index Index; }; void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm) { if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } if (nbrhs == 0) {x = NULL; nbrhs=1;} s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); } void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm) { if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } if (nbrhs == 0) {x = NULL; nbrhs=1;} d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); } void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex *vals, int *perm, int * invp, std::complex *x, int nbrhs, int *iparm, double *dparm) { if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } if (nbrhs == 0) {x = NULL; nbrhs=1;} c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); } void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex *vals, int *perm, int * invp, std::complex *x, int nbrhs, int *iparm, double *dparm) { if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } if (nbrhs == 0) {x = NULL; nbrhs=1;} z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); } // Convert the matrix to Fortran-style Numbering template void c_to_fortran_numbering (MatrixType& mat) { if ( !(mat.outerIndexPtr()[0]) ) { int i; for(i = 0; i <= mat.rows(); ++i) ++mat.outerIndexPtr()[i]; for(i = 0; i < mat.nonZeros(); ++i) ++mat.innerIndexPtr()[i]; } } // Convert to C-style Numbering template void fortran_to_c_numbering (MatrixType& mat) { // Check the Numbering if ( mat.outerIndexPtr()[0] == 1 ) { // Convert to C-style numbering int i; for(i = 0; i <= mat.rows(); ++i) --mat.outerIndexPtr()[i]; for(i = 0; i < mat.nonZeros(); ++i) --mat.innerIndexPtr()[i]; } } } // This is the base class to interface with PaStiX functions. // Users should not used this class directly. template class PastixBase : internal::noncopyable { public: typedef typename internal::pastix_traits::MatrixType _MatrixType; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef Matrix Vector; typedef SparseMatrix ColSpMatrix; public: PastixBase() : m_initisOk(false), m_analysisIsOk(false), m_factorizationIsOk(false), m_isInitialized(false), m_pastixdata(0), m_size(0) { init(); } ~PastixBase() { clean(); } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. * * \sa compute() */ template inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(m_isInitialized && "Pastix solver is not initialized."); eigen_assert(rows()==b.rows() && "PastixBase::solve(): invalid number of rows of the right hand side matrix b"); return internal::solve_retval(*this, b.derived()); } template bool _solve (const MatrixBase &b, MatrixBase &x) const; Derived& derived() { return *static_cast(this); } const Derived& derived() const { return *static_cast(this); } /** Returns a reference to the integer vector IPARM of PaStiX parameters * to modify the default parameters. * The statistics related to the different phases of factorization and solve are saved here as well * \sa analyzePattern() factorize() */ Array& iparm() { return m_iparm; } /** Return a reference to a particular index parameter of the IPARM vector * \sa iparm() */ int& iparm(int idxparam) { return m_iparm(idxparam); } /** Returns a reference to the double vector DPARM of PaStiX parameters * The statistics related to the different phases of factorization and solve are saved here as well * \sa analyzePattern() factorize() */ Array& dparm() { return m_dparm; } /** Return a reference to a particular index parameter of the DPARM vector * \sa dparm() */ double& dparm(int idxparam) { return m_dparm(idxparam); } inline Index cols() const { return m_size; } inline Index rows() const { return m_size; } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the PaStiX reports a problem * \c InvalidInput if the input matrix is invalid * * \sa iparm() */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. * * \sa compute() */ template inline const internal::sparse_solve_retval solve(const SparseMatrixBase& b) const { eigen_assert(m_isInitialized && "Pastix LU, LLT or LDLT is not initialized."); eigen_assert(rows()==b.rows() && "PastixBase::solve(): invalid number of rows of the right hand side matrix b"); return internal::sparse_solve_retval(*this, b.derived()); } protected: // Initialize the Pastix data structure, check the matrix void init(); // Compute the ordering and the symbolic factorization void analyzePattern(ColSpMatrix& mat); // Compute the numerical factorization void factorize(ColSpMatrix& mat); // Free all the data allocated by Pastix void clean() { eigen_assert(m_initisOk && "The Pastix structure should be allocated first"); m_iparm(IPARM_START_TASK) = API_TASK_CLEAN; m_iparm(IPARM_END_TASK) = API_TASK_CLEAN; internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0, m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); } void compute(ColSpMatrix& mat); int m_initisOk; int m_analysisIsOk; int m_factorizationIsOk; bool m_isInitialized; mutable ComputationInfo m_info; mutable pastix_data_t *m_pastixdata; // Data structure for pastix mutable int m_comm; // The MPI communicator identifier mutable Matrix m_iparm; // integer vector for the input parameters mutable Matrix m_dparm; // Scalar vector for the input parameters mutable Matrix m_perm; // Permutation vector mutable Matrix m_invp; // Inverse permutation vector mutable int m_size; // Size of the matrix }; /** Initialize the PaStiX data structure. *A first call to this function fills iparm and dparm with the default PaStiX parameters * \sa iparm() dparm() */ template void PastixBase::init() { m_size = 0; m_iparm.setZero(IPARM_SIZE); m_dparm.setZero(DPARM_SIZE); m_iparm(IPARM_MODIFY_PARAMETER) = API_NO; pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, 0, 0, 0, 0, 1, m_iparm.data(), m_dparm.data()); m_iparm[IPARM_MATRIX_VERIFICATION] = API_NO; m_iparm[IPARM_VERBOSE] = 2; m_iparm[IPARM_ORDERING] = API_ORDER_SCOTCH; m_iparm[IPARM_INCOMPLETE] = API_NO; m_iparm[IPARM_OOC_LIMIT] = 2000; m_iparm[IPARM_RHS_MAKING] = API_RHS_B; m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO; m_iparm(IPARM_START_TASK) = API_TASK_INIT; m_iparm(IPARM_END_TASK) = API_TASK_INIT; internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0, 0, 0, 0, 0, m_iparm.data(), m_dparm.data()); // Check the returned error if(m_iparm(IPARM_ERROR_NUMBER)) { m_info = InvalidInput; m_initisOk = false; } else { m_info = Success; m_initisOk = true; } } template void PastixBase::compute(ColSpMatrix& mat) { eigen_assert(mat.rows() == mat.cols() && "The input matrix should be squared"); analyzePattern(mat); factorize(mat); m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO; m_isInitialized = m_factorizationIsOk; } template void PastixBase::analyzePattern(ColSpMatrix& mat) { eigen_assert(m_initisOk && "The initialization of PaSTiX failed"); // clean previous calls if(m_size>0) clean(); m_size = mat.rows(); m_perm.resize(m_size); m_invp.resize(m_size); m_iparm(IPARM_START_TASK) = API_TASK_ORDERING; m_iparm(IPARM_END_TASK) = API_TASK_ANALYSE; internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(), mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); // Check the returned error if(m_iparm(IPARM_ERROR_NUMBER)) { m_info = NumericalIssue; m_analysisIsOk = false; } else { m_info = Success; m_analysisIsOk = true; } } template void PastixBase::factorize(ColSpMatrix& mat) { // if(&m_cpyMat != &mat) m_cpyMat = mat; eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase"); m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT; m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT; m_size = mat.rows(); internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(), mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); // Check the returned error if(m_iparm(IPARM_ERROR_NUMBER)) { m_info = NumericalIssue; m_factorizationIsOk = false; m_isInitialized = false; } else { m_info = Success; m_factorizationIsOk = true; m_isInitialized = true; } } /* Solve the system */ template template bool PastixBase::_solve (const MatrixBase &b, MatrixBase &x) const { eigen_assert(m_isInitialized && "The matrix should be factorized first"); EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); int rhs = 1; x = b; /* on return, x is overwritten by the computed solution */ for (int i = 0; i < b.cols(); i++){ m_iparm[IPARM_START_TASK] = API_TASK_SOLVE; m_iparm[IPARM_END_TASK] = API_TASK_REFINE; internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, x.rows(), 0, 0, 0, m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data()); } // Check the returned error m_info = m_iparm(IPARM_ERROR_NUMBER)==0 ? Success : NumericalIssue; return m_iparm(IPARM_ERROR_NUMBER)==0; } /** \ingroup PaStiXSupport_Module * \class PastixLU * \brief Sparse direct LU solver based on PaStiX library * * This class is used to solve the linear systems A.X = B with a supernodal LU * factorization in the PaStiX library. The matrix A should be squared and nonsingular * PaStiX requires that the matrix A has a symmetric structural pattern. * This interface can symmetrize the input matrix otherwise. * The vectors or matrices X and B can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam IsStrSym Indicates if the input matrix has a symmetric pattern, default is false * NOTE : Note that if the analysis and factorization phase are called separately, * the input matrix will be symmetrized at each call, hence it is advised to * symmetrize the matrix in a end-user program and set \p IsStrSym to true * * \sa \ref TutorialSparseDirectSolvers * */ template class PastixLU : public PastixBase< PastixLU<_MatrixType> > { public: typedef _MatrixType MatrixType; typedef PastixBase > Base; typedef typename Base::ColSpMatrix ColSpMatrix; typedef typename MatrixType::Index Index; public: PastixLU() : Base() { init(); } PastixLU(const MatrixType& matrix):Base() { init(); compute(matrix); } /** Compute the LU supernodal factorization of \p matrix. * iparm and dparm can be used to tune the PaStiX parameters. * see the PaStiX user's manual * \sa analyzePattern() factorize() */ void compute (const MatrixType& matrix) { m_structureIsUptodate = false; ColSpMatrix temp; grabMatrix(matrix, temp); Base::compute(temp); } /** Compute the LU symbolic factorization of \p matrix using its sparsity pattern. * Several ordering methods can be used at this step. See the PaStiX user's manual. * The result of this operation can be used with successive matrices having the same pattern as \p matrix * \sa factorize() */ void analyzePattern(const MatrixType& matrix) { m_structureIsUptodate = false; ColSpMatrix temp; grabMatrix(matrix, temp); Base::analyzePattern(temp); } /** Compute the LU supernodal factorization of \p matrix * WARNING The matrix \p matrix should have the same structural pattern * as the same used in the analysis phase. * \sa analyzePattern() */ void factorize(const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::factorize(temp); } protected: void init() { m_structureIsUptodate = false; m_iparm(IPARM_SYM) = API_SYM_NO; m_iparm(IPARM_FACTORIZATION) = API_FACT_LU; } void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) { if(IsStrSym) out = matrix; else { if(!m_structureIsUptodate) { // update the transposed structure m_transposedStructure = matrix.transpose(); // Set the elements of the matrix to zero for (Index j=0; j * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX * * \sa \ref TutorialSparseDirectSolvers */ template class PastixLLT : public PastixBase< PastixLLT<_MatrixType, _UpLo> > { public: typedef _MatrixType MatrixType; typedef PastixBase > Base; typedef typename Base::ColSpMatrix ColSpMatrix; public: enum { UpLo = _UpLo }; PastixLLT() : Base() { init(); } PastixLLT(const MatrixType& matrix):Base() { init(); compute(matrix); } /** Compute the L factor of the LL^T supernodal factorization of \p matrix * \sa analyzePattern() factorize() */ void compute (const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::compute(temp); } /** Compute the LL^T symbolic factorization of \p matrix using its sparsity pattern * The result of this operation can be used with successive matrices having the same pattern as \p matrix * \sa factorize() */ void analyzePattern(const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::analyzePattern(temp); } /** Compute the LL^T supernodal numerical factorization of \p matrix * \sa analyzePattern() */ void factorize(const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::factorize(temp); } protected: using Base::m_iparm; void init() { m_iparm(IPARM_SYM) = API_SYM_YES; m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT; } void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) { // Pastix supports only lower, column-major matrices out.template selfadjointView() = matrix.template selfadjointView(); internal::c_to_fortran_numbering(out); } }; /** \ingroup PaStiXSupport_Module * \class PastixLDLT * \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library * * This class is used to solve the linear systems A.X = B via a LDL^T supernodal Cholesky factorization * available in the PaStiX library. The matrix A should be symmetric and positive definite * WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX * The vectors or matrices X and B can be either dense or sparse * * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX * * \sa \ref TutorialSparseDirectSolvers */ template class PastixLDLT : public PastixBase< PastixLDLT<_MatrixType, _UpLo> > { public: typedef _MatrixType MatrixType; typedef PastixBase > Base; typedef typename Base::ColSpMatrix ColSpMatrix; public: enum { UpLo = _UpLo }; PastixLDLT():Base() { init(); } PastixLDLT(const MatrixType& matrix):Base() { init(); compute(matrix); } /** Compute the L and D factors of the LDL^T factorization of \p matrix * \sa analyzePattern() factorize() */ void compute (const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::compute(temp); } /** Compute the LDL^T symbolic factorization of \p matrix using its sparsity pattern * The result of this operation can be used with successive matrices having the same pattern as \p matrix * \sa factorize() */ void analyzePattern(const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::analyzePattern(temp); } /** Compute the LDL^T supernodal numerical factorization of \p matrix * */ void factorize(const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::factorize(temp); } protected: using Base::m_iparm; void init() { m_iparm(IPARM_SYM) = API_SYM_YES; m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT; } void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) { // Pastix supports only lower, column-major matrices out.template selfadjointView() = matrix.template selfadjointView(); internal::c_to_fortran_numbering(out); } }; namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef PastixBase<_MatrixType> Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { dec()._solve(rhs(),dst); } }; template struct sparse_solve_retval, Rhs> : sparse_solve_retval_base, Rhs> { typedef PastixBase<_MatrixType> Dec; EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { this->defaultEvalTo(dst); } }; } // end namespace internal } // end namespace Eigen #endif RcppEigen/inst/include/Eigen/src/plugins/0000755000176200001440000000000012550462273020033 5ustar liggesusersRcppEigen/inst/include/Eigen/src/plugins/MatrixCwiseBinaryOps.h0000644000176200001440000001377712550462273024311 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // This file is a base class plugin containing matrix specifics coefficient wise functions. /** \returns an expression of the Schur product (coefficient wise product) of *this and \a other * * Example: \include MatrixBase_cwiseProduct.cpp * Output: \verbinclude MatrixBase_cwiseProduct.out * * \sa class CwiseBinaryOp, cwiseAbs2 */ template EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived) cwiseProduct(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { return EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)(derived(), other.derived()); } /** \returns an expression of the coefficient-wise == operator of *this and \a other * * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. * In order to check for equality between two vectors or matrices with floating-point coefficients, it is * generally a far better idea to use a fuzzy comparison as provided by isApprox() and * isMuchSmallerThan(). * * Example: \include MatrixBase_cwiseEqual.cpp * Output: \verbinclude MatrixBase_cwiseEqual.out * * \sa cwiseNotEqual(), isApprox(), isMuchSmallerThan() */ template inline const CwiseBinaryOp, const Derived, const OtherDerived> cwiseEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise != operator of *this and \a other * * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. * In order to check for equality between two vectors or matrices with floating-point coefficients, it is * generally a far better idea to use a fuzzy comparison as provided by isApprox() and * isMuchSmallerThan(). * * Example: \include MatrixBase_cwiseNotEqual.cpp * Output: \verbinclude MatrixBase_cwiseNotEqual.out * * \sa cwiseEqual(), isApprox(), isMuchSmallerThan() */ template inline const CwiseBinaryOp, const Derived, const OtherDerived> cwiseNotEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise min of *this and \a other * * Example: \include MatrixBase_cwiseMin.cpp * Output: \verbinclude MatrixBase_cwiseMin.out * * \sa class CwiseBinaryOp, max() */ template EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise min of *this and scalar \a other * * \sa class CwiseBinaryOp, min() */ EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const ConstantReturnType> cwiseMin(const Scalar &other) const { return cwiseMin(Derived::Constant(rows(), cols(), other)); } /** \returns an expression of the coefficient-wise max of *this and \a other * * Example: \include MatrixBase_cwiseMax.cpp * Output: \verbinclude MatrixBase_cwiseMax.out * * \sa class CwiseBinaryOp, min() */ template EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise max of *this and scalar \a other * * \sa class CwiseBinaryOp, min() */ EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const ConstantReturnType> cwiseMax(const Scalar &other) const { return cwiseMax(Derived::Constant(rows(), cols(), other)); } /** \returns an expression of the coefficient-wise quotient of *this and \a other * * Example: \include MatrixBase_cwiseQuotient.cpp * Output: \verbinclude MatrixBase_cwiseQuotient.out * * \sa class CwiseBinaryOp, cwiseProduct(), cwiseInverse() */ template EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> cwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } typedef CwiseBinaryOp, const Derived, const ConstantReturnType> CwiseScalarEqualReturnType; /** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s * * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. * In order to check for equality between two vectors or matrices with floating-point coefficients, it is * generally a far better idea to use a fuzzy comparison as provided by isApprox() and * isMuchSmallerThan(). * * \sa cwiseEqual(const MatrixBase &) const */ inline const CwiseScalarEqualReturnType cwiseEqual(const Scalar& s) const { return CwiseScalarEqualReturnType(derived(), Derived::Constant(rows(), cols(), s), internal::scalar_cmp_op()); } RcppEigen/inst/include/Eigen/src/plugins/ArrayCwiseBinaryOps.h0000644000176200001440000002267412550462273024117 0ustar liggesusers/** \returns an expression of the coefficient wise product of \c *this and \a other * * \sa MatrixBase::cwiseProduct */ template EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived) operator*(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { return EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)(derived(), other.derived()); } /** \returns an expression of the coefficient wise quotient of \c *this and \a other * * \sa MatrixBase::cwiseQuotient */ template EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> operator/(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise min of \c *this and \a other * * Example: \include Cwise_min.cpp * Output: \verbinclude Cwise_min.out * * \sa max() */ EIGEN_MAKE_CWISE_BINARY_OP(min,internal::scalar_min_op) /** \returns an expression of the coefficient-wise min of \c *this and scalar \a other * * \sa max() */ EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const CwiseNullaryOp, PlainObject> > #ifdef EIGEN_PARSED_BY_DOXYGEN min #else (min) #endif (const Scalar &other) const { return (min)(Derived::PlainObject::Constant(rows(), cols(), other)); } /** \returns an expression of the coefficient-wise max of \c *this and \a other * * Example: \include Cwise_max.cpp * Output: \verbinclude Cwise_max.out * * \sa min() */ EIGEN_MAKE_CWISE_BINARY_OP(max,internal::scalar_max_op) /** \returns an expression of the coefficient-wise max of \c *this and scalar \a other * * \sa min() */ EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const CwiseNullaryOp, PlainObject> > #ifdef EIGEN_PARSED_BY_DOXYGEN max #else (max) #endif (const Scalar &other) const { return (max)(Derived::PlainObject::Constant(rows(), cols(), other)); } #define EIGEN_MAKE_CWISE_COMP_OP(OP, COMPARATOR) \ template \ EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> \ OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const \ { \ return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); \ }\ typedef CwiseBinaryOp, const Derived, const CwiseNullaryOp, PlainObject> > Cmp ## COMPARATOR ## ReturnType; \ typedef CwiseBinaryOp, const CwiseNullaryOp, PlainObject>, const Derived > RCmp ## COMPARATOR ## ReturnType; \ EIGEN_STRONG_INLINE const Cmp ## COMPARATOR ## ReturnType \ OP(const Scalar& s) const { \ return this->OP(Derived::PlainObject::Constant(rows(), cols(), s)); \ } \ friend EIGEN_STRONG_INLINE const RCmp ## COMPARATOR ## ReturnType \ OP(const Scalar& s, const Derived& d) { \ return Derived::PlainObject::Constant(d.rows(), d.cols(), s).OP(d); \ } #define EIGEN_MAKE_CWISE_COMP_R_OP(OP, R_OP, RCOMPARATOR) \ template \ EIGEN_STRONG_INLINE const CwiseBinaryOp, const OtherDerived, const Derived> \ OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const \ { \ return CwiseBinaryOp, const OtherDerived, const Derived>(other.derived(), derived()); \ } \ \ inline const RCmp ## RCOMPARATOR ## ReturnType \ OP(const Scalar& s) const { \ return Derived::PlainObject::Constant(rows(), cols(), s).R_OP(*this); \ } \ friend inline const Cmp ## RCOMPARATOR ## ReturnType \ OP(const Scalar& s, const Derived& d) { \ return d.R_OP(Derived::PlainObject::Constant(d.rows(), d.cols(), s)); \ } /** \returns an expression of the coefficient-wise \< operator of *this and \a other * * Example: \include Cwise_less.cpp * Output: \verbinclude Cwise_less.out * * \sa all(), any(), operator>(), operator<=() */ EIGEN_MAKE_CWISE_COMP_OP(operator<, LT) /** \returns an expression of the coefficient-wise \<= operator of *this and \a other * * Example: \include Cwise_less_equal.cpp * Output: \verbinclude Cwise_less_equal.out * * \sa all(), any(), operator>=(), operator<() */ EIGEN_MAKE_CWISE_COMP_OP(operator<=, LE) /** \returns an expression of the coefficient-wise \> operator of *this and \a other * * Example: \include Cwise_greater.cpp * Output: \verbinclude Cwise_greater.out * * \sa all(), any(), operator>=(), operator<() */ EIGEN_MAKE_CWISE_COMP_R_OP(operator>, operator<, LT) /** \returns an expression of the coefficient-wise \>= operator of *this and \a other * * Example: \include Cwise_greater_equal.cpp * Output: \verbinclude Cwise_greater_equal.out * * \sa all(), any(), operator>(), operator<=() */ EIGEN_MAKE_CWISE_COMP_R_OP(operator>=, operator<=, LE) /** \returns an expression of the coefficient-wise == operator of *this and \a other * * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. * In order to check for equality between two vectors or matrices with floating-point coefficients, it is * generally a far better idea to use a fuzzy comparison as provided by isApprox() and * isMuchSmallerThan(). * * Example: \include Cwise_equal_equal.cpp * Output: \verbinclude Cwise_equal_equal.out * * \sa all(), any(), isApprox(), isMuchSmallerThan() */ EIGEN_MAKE_CWISE_COMP_OP(operator==, EQ) /** \returns an expression of the coefficient-wise != operator of *this and \a other * * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. * In order to check for equality between two vectors or matrices with floating-point coefficients, it is * generally a far better idea to use a fuzzy comparison as provided by isApprox() and * isMuchSmallerThan(). * * Example: \include Cwise_not_equal.cpp * Output: \verbinclude Cwise_not_equal.out * * \sa all(), any(), isApprox(), isMuchSmallerThan() */ EIGEN_MAKE_CWISE_COMP_OP(operator!=, NEQ) #undef EIGEN_MAKE_CWISE_COMP_OP #undef EIGEN_MAKE_CWISE_COMP_R_OP // scalar addition /** \returns an expression of \c *this with each coeff incremented by the constant \a scalar * * Example: \include Cwise_plus.cpp * Output: \verbinclude Cwise_plus.out * * \sa operator+=(), operator-() */ inline const CwiseUnaryOp, const Derived> operator+(const Scalar& scalar) const { return CwiseUnaryOp, const Derived>(derived(), internal::scalar_add_op(scalar)); } friend inline const CwiseUnaryOp, const Derived> operator+(const Scalar& scalar,const EIGEN_CURRENT_STORAGE_BASE_CLASS& other) { return other + scalar; } /** \returns an expression of \c *this with each coeff decremented by the constant \a scalar * * Example: \include Cwise_minus.cpp * Output: \verbinclude Cwise_minus.out * * \sa operator+(), operator-=() */ inline const CwiseUnaryOp, const Derived> operator-(const Scalar& scalar) const { return *this + (-scalar); } friend inline const CwiseUnaryOp, const CwiseUnaryOp, const Derived> > operator-(const Scalar& scalar,const EIGEN_CURRENT_STORAGE_BASE_CLASS& other) { return (-other) + scalar; } /** \returns an expression of the coefficient-wise && operator of *this and \a other * * \warning this operator is for expression of bool only. * * Example: \include Cwise_boolean_and.cpp * Output: \verbinclude Cwise_boolean_and.out * * \sa operator||(), select() */ template inline const CwiseBinaryOp operator&&(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { EIGEN_STATIC_ASSERT((internal::is_same::value && internal::is_same::value), THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); return CwiseBinaryOp(derived(),other.derived()); } /** \returns an expression of the coefficient-wise || operator of *this and \a other * * \warning this operator is for expression of bool only. * * Example: \include Cwise_boolean_or.cpp * Output: \verbinclude Cwise_boolean_or.out * * \sa operator&&(), select() */ template inline const CwiseBinaryOp operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { EIGEN_STATIC_ASSERT((internal::is_same::value && internal::is_same::value), THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); return CwiseBinaryOp(derived(),other.derived()); } RcppEigen/inst/include/Eigen/src/plugins/CommonCwiseUnaryOps.h0000644000176200001440000001471312253717461024140 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // This file is a base class plugin containing common coefficient wise functions. #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal Represents a scalar multiple of an expression */ typedef CwiseUnaryOp, const Derived> ScalarMultipleReturnType; /** \internal Represents a quotient of an expression by a scalar*/ typedef CwiseUnaryOp, const Derived> ScalarQuotient1ReturnType; /** \internal the return type of conjugate() */ typedef typename internal::conditional::IsComplex, const CwiseUnaryOp, const Derived>, const Derived& >::type ConjugateReturnType; /** \internal the return type of real() const */ typedef typename internal::conditional::IsComplex, const CwiseUnaryOp, const Derived>, const Derived& >::type RealReturnType; /** \internal the return type of real() */ typedef typename internal::conditional::IsComplex, CwiseUnaryView, Derived>, Derived& >::type NonConstRealReturnType; /** \internal the return type of imag() const */ typedef CwiseUnaryOp, const Derived> ImagReturnType; /** \internal the return type of imag() */ typedef CwiseUnaryView, Derived> NonConstImagReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN /** \returns an expression of the opposite of \c *this */ inline const CwiseUnaryOp::Scalar>, const Derived> operator-() const { return derived(); } /** \returns an expression of \c *this scaled by the scalar factor \a scalar */ inline const ScalarMultipleReturnType operator*(const Scalar& scalar) const { return CwiseUnaryOp, const Derived> (derived(), internal::scalar_multiple_op(scalar)); } #ifdef EIGEN_PARSED_BY_DOXYGEN const ScalarMultipleReturnType operator*(const RealScalar& scalar) const; #endif /** \returns an expression of \c *this divided by the scalar value \a scalar */ inline const CwiseUnaryOp::Scalar>, const Derived> operator/(const Scalar& scalar) const { return CwiseUnaryOp, const Derived> (derived(), internal::scalar_quotient1_op(scalar)); } /** Overloaded for efficient real matrix times complex scalar value */ inline const CwiseUnaryOp >, const Derived> operator*(const std::complex& scalar) const { return CwiseUnaryOp >, const Derived> (*static_cast(this), internal::scalar_multiple2_op >(scalar)); } inline friend const ScalarMultipleReturnType operator*(const Scalar& scalar, const StorageBaseType& matrix) { return matrix*scalar; } inline friend const CwiseUnaryOp >, const Derived> operator*(const std::complex& scalar, const StorageBaseType& matrix) { return matrix*scalar; } /** \returns an expression of *this with the \a Scalar type casted to * \a NewScalar. * * The template parameter \a NewScalar is the type we are casting the scalars to. * * \sa class CwiseUnaryOp */ template typename internal::cast_return_type::Scalar, NewType>, const Derived> >::type cast() const { return derived(); } /** \returns an expression of the complex conjugate of \c *this. * * \sa adjoint() */ inline ConjugateReturnType conjugate() const { return ConjugateReturnType(derived()); } /** \returns a read-only expression of the real part of \c *this. * * \sa imag() */ inline RealReturnType real() const { return derived(); } /** \returns an read-only expression of the imaginary part of \c *this. * * \sa real() */ inline const ImagReturnType imag() const { return derived(); } /** \brief Apply a unary operator coefficient-wise * \param[in] func Functor implementing the unary operator * \tparam CustomUnaryOp Type of \a func * \returns An expression of a custom coefficient-wise unary operator \a func of *this * * The function \c ptr_fun() from the C++ standard library can be used to make functors out of normal functions. * * Example: * \include class_CwiseUnaryOp_ptrfun.cpp * Output: \verbinclude class_CwiseUnaryOp_ptrfun.out * * Genuine functors allow for more possibilities, for instance it may contain a state. * * Example: * \include class_CwiseUnaryOp.cpp * Output: \verbinclude class_CwiseUnaryOp.out * * \sa class CwiseUnaryOp, class CwiseBinaryOp */ template inline const CwiseUnaryOp unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const { return CwiseUnaryOp(derived(), func); } /** \returns an expression of a custom coefficient-wise unary operator \a func of *this * * The template parameter \a CustomUnaryOp is the type of the functor * of the custom unary operator. * * Example: * \include class_CwiseUnaryOp.cpp * Output: \verbinclude class_CwiseUnaryOp.out * * \sa class CwiseUnaryOp, class CwiseBinaryOp */ template inline const CwiseUnaryView unaryViewExpr(const CustomViewOp& func = CustomViewOp()) const { return CwiseUnaryView(derived(), func); } /** \returns a non const expression of the real part of \c *this. * * \sa imag() */ inline NonConstRealReturnType real() { return derived(); } /** \returns a non const expression of the imaginary part of \c *this. * * \sa real() */ inline NonConstImagReturnType imag() { return derived(); } RcppEigen/inst/include/Eigen/src/plugins/BlockMethods.h0000644000176200001440000010312712305236565022567 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // Copyright (C) 2006-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal expression type of a column */ typedef Block::RowsAtCompileTime, 1, !IsRowMajor> ColXpr; typedef const Block::RowsAtCompileTime, 1, !IsRowMajor> ConstColXpr; /** \internal expression type of a row */ typedef Block::ColsAtCompileTime, IsRowMajor> RowXpr; typedef const Block::ColsAtCompileTime, IsRowMajor> ConstRowXpr; /** \internal expression type of a block of whole columns */ typedef Block::RowsAtCompileTime, Dynamic, !IsRowMajor> ColsBlockXpr; typedef const Block::RowsAtCompileTime, Dynamic, !IsRowMajor> ConstColsBlockXpr; /** \internal expression type of a block of whole rows */ typedef Block::ColsAtCompileTime, IsRowMajor> RowsBlockXpr; typedef const Block::ColsAtCompileTime, IsRowMajor> ConstRowsBlockXpr; /** \internal expression type of a block of whole columns */ template struct NColsBlockXpr { typedef Block::RowsAtCompileTime, N, !IsRowMajor> Type; }; template struct ConstNColsBlockXpr { typedef const Block::RowsAtCompileTime, N, !IsRowMajor> Type; }; /** \internal expression type of a block of whole rows */ template struct NRowsBlockXpr { typedef Block::ColsAtCompileTime, IsRowMajor> Type; }; template struct ConstNRowsBlockXpr { typedef const Block::ColsAtCompileTime, IsRowMajor> Type; }; typedef VectorBlock SegmentReturnType; typedef const VectorBlock ConstSegmentReturnType; template struct FixedSegmentReturnType { typedef VectorBlock Type; }; template struct ConstFixedSegmentReturnType { typedef const VectorBlock Type; }; #endif // not EIGEN_PARSED_BY_DOXYGEN /** \returns a dynamic-size expression of a block in *this. * * \param startRow the first row in the block * \param startCol the first column in the block * \param blockRows the number of rows in the block * \param blockCols the number of columns in the block * * Example: \include MatrixBase_block_int_int_int_int.cpp * Output: \verbinclude MatrixBase_block_int_int_int_int.out * * \note Even though the returned expression has dynamic size, in the case * when it is applied to a fixed-size matrix, it inherits a fixed maximal size, * which means that evaluating it does not cause a dynamic memory allocation. * * \sa class Block, block(Index,Index) */ inline Block block(Index startRow, Index startCol, Index blockRows, Index blockCols) { return Block(derived(), startRow, startCol, blockRows, blockCols); } /** This is the const version of block(Index,Index,Index,Index). */ inline const Block block(Index startRow, Index startCol, Index blockRows, Index blockCols) const { return Block(derived(), startRow, startCol, blockRows, blockCols); } /** \returns a dynamic-size expression of a top-right corner of *this. * * \param cRows the number of rows in the corner * \param cCols the number of columns in the corner * * Example: \include MatrixBase_topRightCorner_int_int.cpp * Output: \verbinclude MatrixBase_topRightCorner_int_int.out * * \sa class Block, block(Index,Index,Index,Index) */ inline Block topRightCorner(Index cRows, Index cCols) { return Block(derived(), 0, cols() - cCols, cRows, cCols); } /** This is the const version of topRightCorner(Index, Index).*/ inline const Block topRightCorner(Index cRows, Index cCols) const { return Block(derived(), 0, cols() - cCols, cRows, cCols); } /** \returns an expression of a fixed-size top-right corner of *this. * * \tparam CRows the number of rows in the corner * \tparam CCols the number of columns in the corner * * Example: \include MatrixBase_template_int_int_topRightCorner.cpp * Output: \verbinclude MatrixBase_template_int_int_topRightCorner.out * * \sa class Block, block(Index,Index) */ template inline Block topRightCorner() { return Block(derived(), 0, cols() - CCols); } /** This is the const version of topRightCorner().*/ template inline const Block topRightCorner() const { return Block(derived(), 0, cols() - CCols); } /** \returns an expression of a top-right corner of *this. * * \tparam CRows number of rows in corner as specified at compile-time * \tparam CCols number of columns in corner as specified at compile-time * \param cRows number of rows in corner as specified at run-time * \param cCols number of columns in corner as specified at run-time * * This function is mainly useful for corners where the number of rows is specified at compile-time * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time * information should not contradict. In other words, \a cRows should equal \a CRows unless * \a CRows is \a Dynamic, and the same for the number of columns. * * Example: \include MatrixBase_template_int_int_topRightCorner_int_int.cpp * Output: \verbinclude MatrixBase_template_int_int_topRightCorner_int_int.out * * \sa class Block */ template inline Block topRightCorner(Index cRows, Index cCols) { return Block(derived(), 0, cols() - cCols, cRows, cCols); } /** This is the const version of topRightCorner(Index, Index).*/ template inline const Block topRightCorner(Index cRows, Index cCols) const { return Block(derived(), 0, cols() - cCols, cRows, cCols); } /** \returns a dynamic-size expression of a top-left corner of *this. * * \param cRows the number of rows in the corner * \param cCols the number of columns in the corner * * Example: \include MatrixBase_topLeftCorner_int_int.cpp * Output: \verbinclude MatrixBase_topLeftCorner_int_int.out * * \sa class Block, block(Index,Index,Index,Index) */ inline Block topLeftCorner(Index cRows, Index cCols) { return Block(derived(), 0, 0, cRows, cCols); } /** This is the const version of topLeftCorner(Index, Index).*/ inline const Block topLeftCorner(Index cRows, Index cCols) const { return Block(derived(), 0, 0, cRows, cCols); } /** \returns an expression of a fixed-size top-left corner of *this. * * The template parameters CRows and CCols are the number of rows and columns in the corner. * * Example: \include MatrixBase_template_int_int_topLeftCorner.cpp * Output: \verbinclude MatrixBase_template_int_int_topLeftCorner.out * * \sa class Block, block(Index,Index,Index,Index) */ template inline Block topLeftCorner() { return Block(derived(), 0, 0); } /** This is the const version of topLeftCorner().*/ template inline const Block topLeftCorner() const { return Block(derived(), 0, 0); } /** \returns an expression of a top-left corner of *this. * * \tparam CRows number of rows in corner as specified at compile-time * \tparam CCols number of columns in corner as specified at compile-time * \param cRows number of rows in corner as specified at run-time * \param cCols number of columns in corner as specified at run-time * * This function is mainly useful for corners where the number of rows is specified at compile-time * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time * information should not contradict. In other words, \a cRows should equal \a CRows unless * \a CRows is \a Dynamic, and the same for the number of columns. * * Example: \include MatrixBase_template_int_int_topLeftCorner_int_int.cpp * Output: \verbinclude MatrixBase_template_int_int_topLeftCorner_int_int.out * * \sa class Block */ template inline Block topLeftCorner(Index cRows, Index cCols) { return Block(derived(), 0, 0, cRows, cCols); } /** This is the const version of topLeftCorner(Index, Index).*/ template inline const Block topLeftCorner(Index cRows, Index cCols) const { return Block(derived(), 0, 0, cRows, cCols); } /** \returns a dynamic-size expression of a bottom-right corner of *this. * * \param cRows the number of rows in the corner * \param cCols the number of columns in the corner * * Example: \include MatrixBase_bottomRightCorner_int_int.cpp * Output: \verbinclude MatrixBase_bottomRightCorner_int_int.out * * \sa class Block, block(Index,Index,Index,Index) */ inline Block bottomRightCorner(Index cRows, Index cCols) { return Block(derived(), rows() - cRows, cols() - cCols, cRows, cCols); } /** This is the const version of bottomRightCorner(Index, Index).*/ inline const Block bottomRightCorner(Index cRows, Index cCols) const { return Block(derived(), rows() - cRows, cols() - cCols, cRows, cCols); } /** \returns an expression of a fixed-size bottom-right corner of *this. * * The template parameters CRows and CCols are the number of rows and columns in the corner. * * Example: \include MatrixBase_template_int_int_bottomRightCorner.cpp * Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner.out * * \sa class Block, block(Index,Index,Index,Index) */ template inline Block bottomRightCorner() { return Block(derived(), rows() - CRows, cols() - CCols); } /** This is the const version of bottomRightCorner().*/ template inline const Block bottomRightCorner() const { return Block(derived(), rows() - CRows, cols() - CCols); } /** \returns an expression of a bottom-right corner of *this. * * \tparam CRows number of rows in corner as specified at compile-time * \tparam CCols number of columns in corner as specified at compile-time * \param cRows number of rows in corner as specified at run-time * \param cCols number of columns in corner as specified at run-time * * This function is mainly useful for corners where the number of rows is specified at compile-time * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time * information should not contradict. In other words, \a cRows should equal \a CRows unless * \a CRows is \a Dynamic, and the same for the number of columns. * * Example: \include MatrixBase_template_int_int_bottomRightCorner_int_int.cpp * Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner_int_int.out * * \sa class Block */ template inline Block bottomRightCorner(Index cRows, Index cCols) { return Block(derived(), rows() - cRows, cols() - cCols, cRows, cCols); } /** This is the const version of bottomRightCorner(Index, Index).*/ template inline const Block bottomRightCorner(Index cRows, Index cCols) const { return Block(derived(), rows() - cRows, cols() - cCols, cRows, cCols); } /** \returns a dynamic-size expression of a bottom-left corner of *this. * * \param cRows the number of rows in the corner * \param cCols the number of columns in the corner * * Example: \include MatrixBase_bottomLeftCorner_int_int.cpp * Output: \verbinclude MatrixBase_bottomLeftCorner_int_int.out * * \sa class Block, block(Index,Index,Index,Index) */ inline Block bottomLeftCorner(Index cRows, Index cCols) { return Block(derived(), rows() - cRows, 0, cRows, cCols); } /** This is the const version of bottomLeftCorner(Index, Index).*/ inline const Block bottomLeftCorner(Index cRows, Index cCols) const { return Block(derived(), rows() - cRows, 0, cRows, cCols); } /** \returns an expression of a fixed-size bottom-left corner of *this. * * The template parameters CRows and CCols are the number of rows and columns in the corner. * * Example: \include MatrixBase_template_int_int_bottomLeftCorner.cpp * Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner.out * * \sa class Block, block(Index,Index,Index,Index) */ template inline Block bottomLeftCorner() { return Block(derived(), rows() - CRows, 0); } /** This is the const version of bottomLeftCorner().*/ template inline const Block bottomLeftCorner() const { return Block(derived(), rows() - CRows, 0); } /** \returns an expression of a bottom-left corner of *this. * * \tparam CRows number of rows in corner as specified at compile-time * \tparam CCols number of columns in corner as specified at compile-time * \param cRows number of rows in corner as specified at run-time * \param cCols number of columns in corner as specified at run-time * * This function is mainly useful for corners where the number of rows is specified at compile-time * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time * information should not contradict. In other words, \a cRows should equal \a CRows unless * \a CRows is \a Dynamic, and the same for the number of columns. * * Example: \include MatrixBase_template_int_int_bottomLeftCorner_int_int.cpp * Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner_int_int.out * * \sa class Block */ template inline Block bottomLeftCorner(Index cRows, Index cCols) { return Block(derived(), rows() - cRows, 0, cRows, cCols); } /** This is the const version of bottomLeftCorner(Index, Index).*/ template inline const Block bottomLeftCorner(Index cRows, Index cCols) const { return Block(derived(), rows() - cRows, 0, cRows, cCols); } /** \returns a block consisting of the top rows of *this. * * \param n the number of rows in the block * * Example: \include MatrixBase_topRows_int.cpp * Output: \verbinclude MatrixBase_topRows_int.out * * \sa class Block, block(Index,Index,Index,Index) */ inline RowsBlockXpr topRows(Index n) { return RowsBlockXpr(derived(), 0, 0, n, cols()); } /** This is the const version of topRows(Index).*/ inline ConstRowsBlockXpr topRows(Index n) const { return ConstRowsBlockXpr(derived(), 0, 0, n, cols()); } /** \returns a block consisting of the top rows of *this. * * \tparam N the number of rows in the block as specified at compile-time * \param n the number of rows in the block as specified at run-time * * The compile-time and run-time information should not contradict. In other words, * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_topRows.cpp * Output: \verbinclude MatrixBase_template_int_topRows.out * * \sa class Block, block(Index,Index,Index,Index) */ template inline typename NRowsBlockXpr::Type topRows(Index n = N) { return typename NRowsBlockXpr::Type(derived(), 0, 0, n, cols()); } /** This is the const version of topRows().*/ template inline typename ConstNRowsBlockXpr::Type topRows(Index n = N) const { return typename ConstNRowsBlockXpr::Type(derived(), 0, 0, n, cols()); } /** \returns a block consisting of the bottom rows of *this. * * \param n the number of rows in the block * * Example: \include MatrixBase_bottomRows_int.cpp * Output: \verbinclude MatrixBase_bottomRows_int.out * * \sa class Block, block(Index,Index,Index,Index) */ inline RowsBlockXpr bottomRows(Index n) { return RowsBlockXpr(derived(), rows() - n, 0, n, cols()); } /** This is the const version of bottomRows(Index).*/ inline ConstRowsBlockXpr bottomRows(Index n) const { return ConstRowsBlockXpr(derived(), rows() - n, 0, n, cols()); } /** \returns a block consisting of the bottom rows of *this. * * \tparam N the number of rows in the block as specified at compile-time * \param n the number of rows in the block as specified at run-time * * The compile-time and run-time information should not contradict. In other words, * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_bottomRows.cpp * Output: \verbinclude MatrixBase_template_int_bottomRows.out * * \sa class Block, block(Index,Index,Index,Index) */ template inline typename NRowsBlockXpr::Type bottomRows(Index n = N) { return typename NRowsBlockXpr::Type(derived(), rows() - n, 0, n, cols()); } /** This is the const version of bottomRows().*/ template inline typename ConstNRowsBlockXpr::Type bottomRows(Index n = N) const { return typename ConstNRowsBlockXpr::Type(derived(), rows() - n, 0, n, cols()); } /** \returns a block consisting of a range of rows of *this. * * \param startRow the index of the first row in the block * \param n the number of rows in the block * * Example: \include DenseBase_middleRows_int.cpp * Output: \verbinclude DenseBase_middleRows_int.out * * \sa class Block, block(Index,Index,Index,Index) */ inline RowsBlockXpr middleRows(Index startRow, Index n) { return RowsBlockXpr(derived(), startRow, 0, n, cols()); } /** This is the const version of middleRows(Index,Index).*/ inline ConstRowsBlockXpr middleRows(Index startRow, Index n) const { return ConstRowsBlockXpr(derived(), startRow, 0, n, cols()); } /** \returns a block consisting of a range of rows of *this. * * \tparam N the number of rows in the block as specified at compile-time * \param startRow the index of the first row in the block * \param n the number of rows in the block as specified at run-time * * The compile-time and run-time information should not contradict. In other words, * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include DenseBase_template_int_middleRows.cpp * Output: \verbinclude DenseBase_template_int_middleRows.out * * \sa class Block, block(Index,Index,Index,Index) */ template inline typename NRowsBlockXpr::Type middleRows(Index startRow, Index n = N) { return typename NRowsBlockXpr::Type(derived(), startRow, 0, n, cols()); } /** This is the const version of middleRows().*/ template inline typename ConstNRowsBlockXpr::Type middleRows(Index startRow, Index n = N) const { return typename ConstNRowsBlockXpr::Type(derived(), startRow, 0, n, cols()); } /** \returns a block consisting of the left columns of *this. * * \param n the number of columns in the block * * Example: \include MatrixBase_leftCols_int.cpp * Output: \verbinclude MatrixBase_leftCols_int.out * * \sa class Block, block(Index,Index,Index,Index) */ inline ColsBlockXpr leftCols(Index n) { return ColsBlockXpr(derived(), 0, 0, rows(), n); } /** This is the const version of leftCols(Index).*/ inline ConstColsBlockXpr leftCols(Index n) const { return ConstColsBlockXpr(derived(), 0, 0, rows(), n); } /** \returns a block consisting of the left columns of *this. * * \tparam N the number of columns in the block as specified at compile-time * \param n the number of columns in the block as specified at run-time * * The compile-time and run-time information should not contradict. In other words, * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_leftCols.cpp * Output: \verbinclude MatrixBase_template_int_leftCols.out * * \sa class Block, block(Index,Index,Index,Index) */ template inline typename NColsBlockXpr::Type leftCols(Index n = N) { return typename NColsBlockXpr::Type(derived(), 0, 0, rows(), n); } /** This is the const version of leftCols().*/ template inline typename ConstNColsBlockXpr::Type leftCols(Index n = N) const { return typename ConstNColsBlockXpr::Type(derived(), 0, 0, rows(), n); } /** \returns a block consisting of the right columns of *this. * * \param n the number of columns in the block * * Example: \include MatrixBase_rightCols_int.cpp * Output: \verbinclude MatrixBase_rightCols_int.out * * \sa class Block, block(Index,Index,Index,Index) */ inline ColsBlockXpr rightCols(Index n) { return ColsBlockXpr(derived(), 0, cols() - n, rows(), n); } /** This is the const version of rightCols(Index).*/ inline ConstColsBlockXpr rightCols(Index n) const { return ConstColsBlockXpr(derived(), 0, cols() - n, rows(), n); } /** \returns a block consisting of the right columns of *this. * * \tparam N the number of columns in the block as specified at compile-time * \param n the number of columns in the block as specified at run-time * * The compile-time and run-time information should not contradict. In other words, * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_rightCols.cpp * Output: \verbinclude MatrixBase_template_int_rightCols.out * * \sa class Block, block(Index,Index,Index,Index) */ template inline typename NColsBlockXpr::Type rightCols(Index n = N) { return typename NColsBlockXpr::Type(derived(), 0, cols() - n, rows(), n); } /** This is the const version of rightCols().*/ template inline typename ConstNColsBlockXpr::Type rightCols(Index n = N) const { return typename ConstNColsBlockXpr::Type(derived(), 0, cols() - n, rows(), n); } /** \returns a block consisting of a range of columns of *this. * * \param startCol the index of the first column in the block * \param numCols the number of columns in the block * * Example: \include DenseBase_middleCols_int.cpp * Output: \verbinclude DenseBase_middleCols_int.out * * \sa class Block, block(Index,Index,Index,Index) */ inline ColsBlockXpr middleCols(Index startCol, Index numCols) { return ColsBlockXpr(derived(), 0, startCol, rows(), numCols); } /** This is the const version of middleCols(Index,Index).*/ inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const { return ConstColsBlockXpr(derived(), 0, startCol, rows(), numCols); } /** \returns a block consisting of a range of columns of *this. * * \tparam N the number of columns in the block as specified at compile-time * \param startCol the index of the first column in the block * \param n the number of columns in the block as specified at run-time * * The compile-time and run-time information should not contradict. In other words, * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include DenseBase_template_int_middleCols.cpp * Output: \verbinclude DenseBase_template_int_middleCols.out * * \sa class Block, block(Index,Index,Index,Index) */ template inline typename NColsBlockXpr::Type middleCols(Index startCol, Index n = N) { return typename NColsBlockXpr::Type(derived(), 0, startCol, rows(), n); } /** This is the const version of middleCols().*/ template inline typename ConstNColsBlockXpr::Type middleCols(Index startCol, Index n = N) const { return typename ConstNColsBlockXpr::Type(derived(), 0, startCol, rows(), n); } /** \returns a fixed-size expression of a block in *this. * * The template parameters \a BlockRows and \a BlockCols are the number of * rows and columns in the block. * * \param startRow the first row in the block * \param startCol the first column in the block * * Example: \include MatrixBase_block_int_int.cpp * Output: \verbinclude MatrixBase_block_int_int.out * * \note since block is a templated member, the keyword template has to be used * if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode * * \sa class Block, block(Index,Index,Index,Index) */ template inline Block block(Index startRow, Index startCol) { return Block(derived(), startRow, startCol); } /** This is the const version of block<>(Index, Index). */ template inline const Block block(Index startRow, Index startCol) const { return Block(derived(), startRow, startCol); } /** \returns an expression of a block in *this. * * \tparam BlockRows number of rows in block as specified at compile-time * \tparam BlockCols number of columns in block as specified at compile-time * \param startRow the first row in the block * \param startCol the first column in the block * \param blockRows number of rows in block as specified at run-time * \param blockCols number of columns in block as specified at run-time * * This function is mainly useful for blocks where the number of rows is specified at compile-time * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time * information should not contradict. In other words, \a blockRows should equal \a BlockRows unless * \a BlockRows is \a Dynamic, and the same for the number of columns. * * Example: \include MatrixBase_template_int_int_block_int_int_int_int.cpp * Output: \verbinclude MatrixBase_template_int_int_block_int_int_int_int.cpp * * \sa class Block, block(Index,Index,Index,Index) */ template inline Block block(Index startRow, Index startCol, Index blockRows, Index blockCols) { return Block(derived(), startRow, startCol, blockRows, blockCols); } /** This is the const version of block<>(Index, Index, Index, Index). */ template inline const Block block(Index startRow, Index startCol, Index blockRows, Index blockCols) const { return Block(derived(), startRow, startCol, blockRows, blockCols); } /** \returns an expression of the \a i-th column of *this. Note that the numbering starts at 0. * * Example: \include MatrixBase_col.cpp * Output: \verbinclude MatrixBase_col.out * * \sa row(), class Block */ inline ColXpr col(Index i) { return ColXpr(derived(), i); } /** This is the const version of col(). */ inline ConstColXpr col(Index i) const { return ConstColXpr(derived(), i); } /** \returns an expression of the \a i-th row of *this. Note that the numbering starts at 0. * * Example: \include MatrixBase_row.cpp * Output: \verbinclude MatrixBase_row.out * * \sa col(), class Block */ inline RowXpr row(Index i) { return RowXpr(derived(), i); } /** This is the const version of row(). */ inline ConstRowXpr row(Index i) const { return ConstRowXpr(derived(), i); } /** \returns a dynamic-size expression of a segment (i.e. a vector block) in *this. * * \only_for_vectors * * \param start the first coefficient in the segment * \param n the number of coefficients in the segment * * Example: \include MatrixBase_segment_int_int.cpp * Output: \verbinclude MatrixBase_segment_int_int.out * * \note Even though the returned expression has dynamic size, in the case * when it is applied to a fixed-size vector, it inherits a fixed maximal size, * which means that evaluating it does not cause a dynamic memory allocation. * * \sa class Block, segment(Index) */ inline SegmentReturnType segment(Index start, Index n) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return SegmentReturnType(derived(), start, n); } /** This is the const version of segment(Index,Index).*/ inline ConstSegmentReturnType segment(Index start, Index n) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return ConstSegmentReturnType(derived(), start, n); } /** \returns a dynamic-size expression of the first coefficients of *this. * * \only_for_vectors * * \param n the number of coefficients in the segment * * Example: \include MatrixBase_start_int.cpp * Output: \verbinclude MatrixBase_start_int.out * * \note Even though the returned expression has dynamic size, in the case * when it is applied to a fixed-size vector, it inherits a fixed maximal size, * which means that evaluating it does not cause a dynamic memory allocation. * * \sa class Block, block(Index,Index) */ inline SegmentReturnType head(Index n) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return SegmentReturnType(derived(), 0, n); } /** This is the const version of head(Index).*/ inline ConstSegmentReturnType head(Index n) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return ConstSegmentReturnType(derived(), 0, n); } /** \returns a dynamic-size expression of the last coefficients of *this. * * \only_for_vectors * * \param n the number of coefficients in the segment * * Example: \include MatrixBase_end_int.cpp * Output: \verbinclude MatrixBase_end_int.out * * \note Even though the returned expression has dynamic size, in the case * when it is applied to a fixed-size vector, it inherits a fixed maximal size, * which means that evaluating it does not cause a dynamic memory allocation. * * \sa class Block, block(Index,Index) */ inline SegmentReturnType tail(Index n) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return SegmentReturnType(derived(), this->size() - n, n); } /** This is the const version of tail(Index).*/ inline ConstSegmentReturnType tail(Index n) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return ConstSegmentReturnType(derived(), this->size() - n, n); } /** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this * * \only_for_vectors * * \tparam N the number of coefficients in the segment as specified at compile-time * \param start the index of the first element in the segment * \param n the number of coefficients in the segment as specified at compile-time * * The compile-time and run-time information should not contradict. In other words, * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_segment.cpp * Output: \verbinclude MatrixBase_template_int_segment.out * * \sa class Block */ template inline typename FixedSegmentReturnType::Type segment(Index start, Index n = N) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return typename FixedSegmentReturnType::Type(derived(), start, n); } /** This is the const version of segment(Index).*/ template inline typename ConstFixedSegmentReturnType::Type segment(Index start, Index n = N) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return typename ConstFixedSegmentReturnType::Type(derived(), start, n); } /** \returns a fixed-size expression of the first coefficients of *this. * * \only_for_vectors * * \tparam N the number of coefficients in the segment as specified at compile-time * \param n the number of coefficients in the segment as specified at run-time * * The compile-time and run-time information should not contradict. In other words, * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_start.cpp * Output: \verbinclude MatrixBase_template_int_start.out * * \sa class Block */ template inline typename FixedSegmentReturnType::Type head(Index n = N) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return typename FixedSegmentReturnType::Type(derived(), 0, n); } /** This is the const version of head().*/ template inline typename ConstFixedSegmentReturnType::Type head(Index n = N) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return typename ConstFixedSegmentReturnType::Type(derived(), 0, n); } /** \returns a fixed-size expression of the last coefficients of *this. * * \only_for_vectors * * \tparam N the number of coefficients in the segment as specified at compile-time * \param n the number of coefficients in the segment as specified at run-time * * The compile-time and run-time information should not contradict. In other words, * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_end.cpp * Output: \verbinclude MatrixBase_template_int_end.out * * \sa class Block */ template inline typename FixedSegmentReturnType::Type tail(Index n = N) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return typename FixedSegmentReturnType::Type(derived(), size() - n); } /** This is the const version of tail.*/ template inline typename ConstFixedSegmentReturnType::Type tail(Index n = N) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return typename ConstFixedSegmentReturnType::Type(derived(), size() - n); } RcppEigen/inst/include/Eigen/src/plugins/MatrixCwiseUnaryOps.h0000644000176200001440000000351312550462273024146 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // This file is a base class plugin containing matrix specifics coefficient wise functions. /** \returns an expression of the coefficient-wise absolute value of \c *this * * Example: \include MatrixBase_cwiseAbs.cpp * Output: \verbinclude MatrixBase_cwiseAbs.out * * \sa cwiseAbs2() */ EIGEN_STRONG_INLINE const CwiseUnaryOp, const Derived> cwiseAbs() const { return derived(); } /** \returns an expression of the coefficient-wise squared absolute value of \c *this * * Example: \include MatrixBase_cwiseAbs2.cpp * Output: \verbinclude MatrixBase_cwiseAbs2.out * * \sa cwiseAbs() */ EIGEN_STRONG_INLINE const CwiseUnaryOp, const Derived> cwiseAbs2() const { return derived(); } /** \returns an expression of the coefficient-wise square root of *this. * * Example: \include MatrixBase_cwiseSqrt.cpp * Output: \verbinclude MatrixBase_cwiseSqrt.out * * \sa cwisePow(), cwiseSquare() */ inline const CwiseUnaryOp, const Derived> cwiseSqrt() const { return derived(); } /** \returns an expression of the coefficient-wise inverse of *this. * * Example: \include MatrixBase_cwiseInverse.cpp * Output: \verbinclude MatrixBase_cwiseInverse.out * * \sa cwiseProduct() */ inline const CwiseUnaryOp, const Derived> cwiseInverse() const { return derived(); } RcppEigen/inst/include/Eigen/src/plugins/ArrayCwiseUnaryOps.h0000644000176200001440000001037612550462273023765 0ustar liggesusers /** \returns an expression of the coefficient-wise absolute value of \c *this * * Example: \include Cwise_abs.cpp * Output: \verbinclude Cwise_abs.out * * \sa abs2() */ EIGEN_STRONG_INLINE const CwiseUnaryOp, const Derived> abs() const { return derived(); } /** \returns an expression of the coefficient-wise squared absolute value of \c *this * * Example: \include Cwise_abs2.cpp * Output: \verbinclude Cwise_abs2.out * * \sa abs(), square() */ EIGEN_STRONG_INLINE const CwiseUnaryOp, const Derived> abs2() const { return derived(); } /** \returns an expression of the coefficient-wise exponential of *this. * * Example: \include Cwise_exp.cpp * Output: \verbinclude Cwise_exp.out * * \sa pow(), log(), sin(), cos() */ inline const CwiseUnaryOp, const Derived> exp() const { return derived(); } /** \returns an expression of the coefficient-wise logarithm of *this. * * Example: \include Cwise_log.cpp * Output: \verbinclude Cwise_log.out * * \sa exp() */ inline const CwiseUnaryOp, const Derived> log() const { return derived(); } /** \returns an expression of the coefficient-wise square root of *this. * * Example: \include Cwise_sqrt.cpp * Output: \verbinclude Cwise_sqrt.out * * \sa pow(), square() */ inline const CwiseUnaryOp, const Derived> sqrt() const { return derived(); } /** \returns an expression of the coefficient-wise cosine of *this. * * Example: \include Cwise_cos.cpp * Output: \verbinclude Cwise_cos.out * * \sa sin(), acos() */ inline const CwiseUnaryOp, const Derived> cos() const { return derived(); } /** \returns an expression of the coefficient-wise sine of *this. * * Example: \include Cwise_sin.cpp * Output: \verbinclude Cwise_sin.out * * \sa cos(), asin() */ inline const CwiseUnaryOp, const Derived> sin() const { return derived(); } /** \returns an expression of the coefficient-wise arc cosine of *this. * * Example: \include Cwise_acos.cpp * Output: \verbinclude Cwise_acos.out * * \sa cos(), asin() */ inline const CwiseUnaryOp, const Derived> acos() const { return derived(); } /** \returns an expression of the coefficient-wise arc sine of *this. * * Example: \include Cwise_asin.cpp * Output: \verbinclude Cwise_asin.out * * \sa sin(), acos() */ inline const CwiseUnaryOp, const Derived> asin() const { return derived(); } /** \returns an expression of the coefficient-wise tan of *this. * * Example: \include Cwise_tan.cpp * Output: \verbinclude Cwise_tan.out * * \sa cos(), sin() */ inline const CwiseUnaryOp, Derived> tan() const { return derived(); } /** \returns an expression of the coefficient-wise power of *this to the given exponent. * * Example: \include Cwise_pow.cpp * Output: \verbinclude Cwise_pow.out * * \sa exp(), log() */ inline const CwiseUnaryOp, const Derived> pow(const Scalar& exponent) const { return CwiseUnaryOp, const Derived> (derived(), internal::scalar_pow_op(exponent)); } /** \returns an expression of the coefficient-wise inverse of *this. * * Example: \include Cwise_inverse.cpp * Output: \verbinclude Cwise_inverse.out * * \sa operator/(), operator*() */ inline const CwiseUnaryOp, const Derived> inverse() const { return derived(); } /** \returns an expression of the coefficient-wise square of *this. * * Example: \include Cwise_square.cpp * Output: \verbinclude Cwise_square.out * * \sa operator/(), operator*(), abs2() */ inline const CwiseUnaryOp, const Derived> square() const { return derived(); } /** \returns an expression of the coefficient-wise cube of *this. * * Example: \include Cwise_cube.cpp * Output: \verbinclude Cwise_cube.out * * \sa square(), pow() */ inline const CwiseUnaryOp, const Derived> cube() const { return derived(); } RcppEigen/inst/include/Eigen/src/plugins/CommonCwiseBinaryOps.h0000644000176200001440000000362212253717461024263 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // This file is a base class plugin containing common coefficient wise functions. /** \returns an expression of the difference of \c *this and \a other * * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-(). * * \sa class CwiseBinaryOp, operator-=() */ EIGEN_MAKE_CWISE_BINARY_OP(operator-,internal::scalar_difference_op) /** \returns an expression of the sum of \c *this and \a other * * \note If you want to add a given scalar to all coefficients, see Cwise::operator+(). * * \sa class CwiseBinaryOp, operator+=() */ EIGEN_MAKE_CWISE_BINARY_OP(operator+,internal::scalar_sum_op) /** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other * * The template parameter \a CustomBinaryOp is the type of the functor * of the custom operator (see class CwiseBinaryOp for an example) * * Here is an example illustrating the use of custom functors: * \include class_CwiseBinaryOp.cpp * Output: \verbinclude class_CwiseBinaryOp.out * * \sa class CwiseBinaryOp, operator+(), operator-(), cwiseProduct() */ template EIGEN_STRONG_INLINE const CwiseBinaryOp binaryExpr(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other, const CustomBinaryOp& func = CustomBinaryOp()) const { return CwiseBinaryOp(derived(), other.derived(), func); } RcppEigen/inst/include/Eigen/src/Householder/0000755000176200001440000000000012375356050020633 5ustar liggesusersRcppEigen/inst/include/Eigen/src/Householder/HouseholderSequence.h0000644000176200001440000004534412253717461024772 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HOUSEHOLDER_SEQUENCE_H #define EIGEN_HOUSEHOLDER_SEQUENCE_H namespace Eigen { /** \ingroup Householder_Module * \householder_module * \class HouseholderSequence * \brief Sequence of Householder reflections acting on subspaces with decreasing size * \tparam VectorsType type of matrix containing the Householder vectors * \tparam CoeffsType type of vector containing the Householder coefficients * \tparam Side either OnTheLeft (the default) or OnTheRight * * This class represents a product sequence of Householder reflections where the first Householder reflection * acts on the whole space, the second Householder reflection leaves the one-dimensional subspace spanned by * the first unit vector invariant, the third Householder reflection leaves the two-dimensional subspace * spanned by the first two unit vectors invariant, and so on up to the last reflection which leaves all but * one dimensions invariant and acts only on the last dimension. Such sequences of Householder reflections * are used in several algorithms to zero out certain parts of a matrix. Indeed, the methods * HessenbergDecomposition::matrixQ(), Tridiagonalization::matrixQ(), HouseholderQR::householderQ(), * and ColPivHouseholderQR::householderQ() all return a %HouseholderSequence. * * More precisely, the class %HouseholderSequence represents an \f$ n \times n \f$ matrix \f$ H \f$ of the * form \f$ H = \prod_{i=0}^{n-1} H_i \f$ where the i-th Householder reflection is \f$ H_i = I - h_i v_i * v_i^* \f$. The i-th Householder coefficient \f$ h_i \f$ is a scalar and the i-th Householder vector \f$ * v_i \f$ is a vector of the form * \f[ * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ]. * \f] * The last \f$ n-i \f$ entries of \f$ v_i \f$ are called the essential part of the Householder vector. * * Typical usages are listed below, where H is a HouseholderSequence: * \code * A.applyOnTheRight(H); // A = A * H * A.applyOnTheLeft(H); // A = H * A * A.applyOnTheRight(H.adjoint()); // A = A * H^* * A.applyOnTheLeft(H.adjoint()); // A = H^* * A * MatrixXd Q = H; // conversion to a dense matrix * \endcode * In addition to the adjoint, you can also apply the inverse (=adjoint), the transpose, and the conjugate operators. * * See the documentation for HouseholderSequence(const VectorsType&, const CoeffsType&) for an example. * * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ namespace internal { template struct traits > { typedef typename VectorsType::Scalar Scalar; typedef typename VectorsType::Index Index; typedef typename VectorsType::StorageKind StorageKind; enum { RowsAtCompileTime = Side==OnTheLeft ? traits::RowsAtCompileTime : traits::ColsAtCompileTime, ColsAtCompileTime = RowsAtCompileTime, MaxRowsAtCompileTime = Side==OnTheLeft ? traits::MaxRowsAtCompileTime : traits::MaxColsAtCompileTime, MaxColsAtCompileTime = MaxRowsAtCompileTime, Flags = 0 }; }; template struct hseq_side_dependent_impl { typedef Block EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; typedef typename VectorsType::Index Index; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; return Block(h.m_vectors, start, k, h.rows()-start, 1); } }; template struct hseq_side_dependent_impl { typedef Transpose > EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; typedef typename VectorsType::Index Index; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; return Block(h.m_vectors, k, start, 1, h.rows()-start).transpose(); } }; template struct matrix_type_times_scalar_type { typedef typename scalar_product_traits::ReturnType ResultScalar; typedef Matrix Type; }; } // end namespace internal template class HouseholderSequence : public EigenBase > { typedef typename internal::hseq_side_dependent_impl::EssentialVectorType EssentialVectorType; public: enum { RowsAtCompileTime = internal::traits::RowsAtCompileTime, ColsAtCompileTime = internal::traits::ColsAtCompileTime, MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime }; typedef typename internal::traits::Scalar Scalar; typedef typename VectorsType::Index Index; typedef HouseholderSequence< typename internal::conditional::IsComplex, typename internal::remove_all::type, VectorsType>::type, typename internal::conditional::IsComplex, typename internal::remove_all::type, CoeffsType>::type, Side > ConjugateReturnType; /** \brief Constructor. * \param[in] v %Matrix containing the essential parts of the Householder vectors * \param[in] h Vector containing the Householder coefficients * * Constructs the Householder sequence with coefficients given by \p h and vectors given by \p v. The * i-th Householder coefficient \f$ h_i \f$ is given by \p h(i) and the essential part of the i-th * Householder vector \f$ v_i \f$ is given by \p v(k,i) with \p k > \p i (the subdiagonal part of the * i-th column). If \p v has fewer columns than rows, then the Householder sequence contains as many * Householder reflections as there are columns. * * \note The %HouseholderSequence object stores \p v and \p h by reference. * * Example: \include HouseholderSequence_HouseholderSequence.cpp * Output: \verbinclude HouseholderSequence_HouseholderSequence.out * * \sa setLength(), setShift() */ HouseholderSequence(const VectorsType& v, const CoeffsType& h) : m_vectors(v), m_coeffs(h), m_trans(false), m_length(v.diagonalSize()), m_shift(0) { } /** \brief Copy constructor. */ HouseholderSequence(const HouseholderSequence& other) : m_vectors(other.m_vectors), m_coeffs(other.m_coeffs), m_trans(other.m_trans), m_length(other.m_length), m_shift(other.m_shift) { } /** \brief Number of rows of transformation viewed as a matrix. * \returns Number of rows * \details This equals the dimension of the space that the transformation acts on. */ Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); } /** \brief Number of columns of transformation viewed as a matrix. * \returns Number of columns * \details This equals the dimension of the space that the transformation acts on. */ Index cols() const { return rows(); } /** \brief Essential part of a Householder vector. * \param[in] k Index of Householder reflection * \returns Vector containing non-trivial entries of k-th Householder vector * * This function returns the essential part of the Householder vector \f$ v_i \f$. This is a vector of * length \f$ n-i \f$ containing the last \f$ n-i \f$ entries of the vector * \f[ * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ]. * \f] * The index \f$ i \f$ equals \p k + shift(), corresponding to the k-th column of the matrix \p v * passed to the constructor. * * \sa setShift(), shift() */ const EssentialVectorType essentialVector(Index k) const { eigen_assert(k >= 0 && k < m_length); return internal::hseq_side_dependent_impl::essentialVector(*this, k); } /** \brief %Transpose of the Householder sequence. */ HouseholderSequence transpose() const { return HouseholderSequence(*this).setTrans(!m_trans); } /** \brief Complex conjugate of the Householder sequence. */ ConjugateReturnType conjugate() const { return ConjugateReturnType(m_vectors.conjugate(), m_coeffs.conjugate()) .setTrans(m_trans) .setLength(m_length) .setShift(m_shift); } /** \brief Adjoint (conjugate transpose) of the Householder sequence. */ ConjugateReturnType adjoint() const { return conjugate().setTrans(!m_trans); } /** \brief Inverse of the Householder sequence (equals the adjoint). */ ConjugateReturnType inverse() const { return adjoint(); } /** \internal */ template inline void evalTo(DestType& dst) const { Matrix workspace(rows()); evalTo(dst, workspace); } /** \internal */ template void evalTo(Dest& dst, Workspace& workspace) const { workspace.resize(rows()); Index vecs = m_length; if( internal::is_same::type,Dest>::value && internal::extract_data(dst) == internal::extract_data(m_vectors)) { // in-place dst.diagonal().setOnes(); dst.template triangularView().setZero(); for(Index k = vecs-1; k >= 0; --k) { Index cornerSize = rows() - k - m_shift; if(m_trans) dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), workspace.data()); else dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), workspace.data()); // clear the off diagonal vector dst.col(k).tail(rows()-k-1).setZero(); } // clear the remaining columns if needed for(Index k = 0; k= 0; --k) { Index cornerSize = rows() - k - m_shift; if(m_trans) dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0)); else dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0)); } } } /** \internal */ template inline void applyThisOnTheRight(Dest& dst) const { Matrix workspace(dst.rows()); applyThisOnTheRight(dst, workspace); } /** \internal */ template inline void applyThisOnTheRight(Dest& dst, Workspace& workspace) const { workspace.resize(dst.rows()); for(Index k = 0; k < m_length; ++k) { Index actual_k = m_trans ? m_length-k-1 : k; dst.rightCols(rows()-m_shift-actual_k) .applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data()); } } /** \internal */ template inline void applyThisOnTheLeft(Dest& dst) const { Matrix workspace(dst.cols()); applyThisOnTheLeft(dst, workspace); } /** \internal */ template inline void applyThisOnTheLeft(Dest& dst, Workspace& workspace) const { workspace.resize(dst.cols()); for(Index k = 0; k < m_length; ++k) { Index actual_k = m_trans ? k : m_length-k-1; dst.bottomRows(rows()-m_shift-actual_k) .applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data()); } } /** \brief Computes the product of a Householder sequence with a matrix. * \param[in] other %Matrix being multiplied. * \returns Expression object representing the product. * * This function computes \f$ HM \f$ where \f$ H \f$ is the Householder sequence represented by \p *this * and \f$ M \f$ is the matrix \p other. */ template typename internal::matrix_type_times_scalar_type::Type operator*(const MatrixBase& other) const { typename internal::matrix_type_times_scalar_type::Type res(other.template cast::ResultScalar>()); applyThisOnTheLeft(res); return res; } template friend struct internal::hseq_side_dependent_impl; /** \brief Sets the length of the Householder sequence. * \param [in] length New value for the length. * * By default, the length \f$ n \f$ of the Householder sequence \f$ H = H_0 H_1 \ldots H_{n-1} \f$ is set * to the number of columns of the matrix \p v passed to the constructor, or the number of rows if that * is smaller. After this function is called, the length equals \p length. * * \sa length() */ HouseholderSequence& setLength(Index length) { m_length = length; return *this; } /** \brief Sets the shift of the Householder sequence. * \param [in] shift New value for the shift. * * By default, a %HouseholderSequence object represents \f$ H = H_0 H_1 \ldots H_{n-1} \f$ and the i-th * column of the matrix \p v passed to the constructor corresponds to the i-th Householder * reflection. After this function is called, the object represents \f$ H = H_{\mathrm{shift}} * H_{\mathrm{shift}+1} \ldots H_{n-1} \f$ and the i-th column of \p v corresponds to the (shift+i)-th * Householder reflection. * * \sa shift() */ HouseholderSequence& setShift(Index shift) { m_shift = shift; return *this; } Index length() const { return m_length; } /**< \brief Returns the length of the Householder sequence. */ Index shift() const { return m_shift; } /**< \brief Returns the shift of the Householder sequence. */ /* Necessary for .adjoint() and .conjugate() */ template friend class HouseholderSequence; protected: /** \brief Sets the transpose flag. * \param [in] trans New value of the transpose flag. * * By default, the transpose flag is not set. If the transpose flag is set, then this object represents * \f$ H^T = H_{n-1}^T \ldots H_1^T H_0^T \f$ instead of \f$ H = H_0 H_1 \ldots H_{n-1} \f$. * * \sa trans() */ HouseholderSequence& setTrans(bool trans) { m_trans = trans; return *this; } bool trans() const { return m_trans; } /**< \brief Returns the transpose flag. */ typename VectorsType::Nested m_vectors; typename CoeffsType::Nested m_coeffs; bool m_trans; Index m_length; Index m_shift; }; /** \brief Computes the product of a matrix with a Householder sequence. * \param[in] other %Matrix being multiplied. * \param[in] h %HouseholderSequence being multiplied. * \returns Expression object representing the product. * * This function computes \f$ MH \f$ where \f$ M \f$ is the matrix \p other and \f$ H \f$ is the * Householder sequence represented by \p h. */ template typename internal::matrix_type_times_scalar_type::Type operator*(const MatrixBase& other, const HouseholderSequence& h) { typename internal::matrix_type_times_scalar_type::Type res(other.template cast::ResultScalar>()); h.applyThisOnTheRight(res); return res; } /** \ingroup Householder_Module \householder_module * \brief Convenience function for constructing a Householder sequence. * \returns A HouseholderSequence constructed from the specified arguments. */ template HouseholderSequence householderSequence(const VectorsType& v, const CoeffsType& h) { return HouseholderSequence(v, h); } /** \ingroup Householder_Module \householder_module * \brief Convenience function for constructing a Householder sequence. * \returns A HouseholderSequence constructed from the specified arguments. * \details This function differs from householderSequence() in that the template argument \p OnTheSide of * the constructed HouseholderSequence is set to OnTheRight, instead of the default OnTheLeft. */ template HouseholderSequence rightHouseholderSequence(const VectorsType& v, const CoeffsType& h) { return HouseholderSequence(v, h); } } // end namespace Eigen #endif // EIGEN_HOUSEHOLDER_SEQUENCE_H RcppEigen/inst/include/Eigen/src/Householder/BlockHouseholder.h0000644000176200001440000000527312375356050024247 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Vincent Lejeune // Copyright (C) 2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BLOCK_HOUSEHOLDER_H #define EIGEN_BLOCK_HOUSEHOLDER_H // This file contains some helper function to deal with block householder reflectors namespace Eigen { namespace internal { /** \internal */ template void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs) { typedef typename TriangularFactorType::Index Index; typedef typename VectorsType::Scalar Scalar; const Index nbVecs = vectors.cols(); eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs); for(Index i = 0; i < nbVecs; i++) { Index rs = vectors.rows() - i; Scalar Vii = vectors(i,i); vectors.const_cast_derived().coeffRef(i,i) = Scalar(1); triFactor.col(i).head(i).noalias() = -hCoeffs(i) * vectors.block(i, 0, rs, i).adjoint() * vectors.col(i).tail(rs); vectors.const_cast_derived().coeffRef(i, i) = Vii; // FIXME add .noalias() once the triangular product can work inplace triFactor.col(i).head(i) = triFactor.block(0,0,i,i).template triangularView() * triFactor.col(i).head(i); triFactor(i,i) = hCoeffs(i); } } /** \internal */ template void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vectors, const CoeffsType& hCoeffs) { typedef typename MatrixType::Index Index; enum { TFactorSize = MatrixType::ColsAtCompileTime }; Index nbVecs = vectors.cols(); Matrix T(nbVecs,nbVecs); make_block_householder_triangular_factor(T, vectors, hCoeffs); const TriangularView& V(vectors); // A -= V T V^* A Matrix tmp = V.adjoint() * mat; // FIXME add .noalias() once the triangular product can work inplace tmp = T.template triangularView().adjoint() * tmp; mat.noalias() -= V * tmp; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_BLOCK_HOUSEHOLDER_H RcppEigen/inst/include/Eigen/src/Householder/Householder.h0000644000176200001440000001217612253717461023276 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Benoit Jacob // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HOUSEHOLDER_H #define EIGEN_HOUSEHOLDER_H namespace Eigen { namespace internal { template struct decrement_size { enum { ret = n==Dynamic ? n : n-1 }; }; } /** Computes the elementary reflector H such that: * \f$ H *this = [ beta 0 ... 0]^T \f$ * where the transformation H is: * \f$ H = I - tau v v^*\f$ * and the vector v is: * \f$ v^T = [1 essential^T] \f$ * * The essential part of the vector \c v is stored in *this. * * On output: * \param tau the scaling factor of the Householder transformation * \param beta the result of H * \c *this * * \sa MatrixBase::makeHouseholder(), MatrixBase::applyHouseholderOnTheLeft(), * MatrixBase::applyHouseholderOnTheRight() */ template void MatrixBase::makeHouseholderInPlace(Scalar& tau, RealScalar& beta) { VectorBlock::ret> essentialPart(derived(), 1, size()-1); makeHouseholder(essentialPart, tau, beta); } /** Computes the elementary reflector H such that: * \f$ H *this = [ beta 0 ... 0]^T \f$ * where the transformation H is: * \f$ H = I - tau v v^*\f$ * and the vector v is: * \f$ v^T = [1 essential^T] \f$ * * On output: * \param essential the essential part of the vector \c v * \param tau the scaling factor of the Householder transformation * \param beta the result of H * \c *this * * \sa MatrixBase::makeHouseholderInPlace(), MatrixBase::applyHouseholderOnTheLeft(), * MatrixBase::applyHouseholderOnTheRight() */ template template void MatrixBase::makeHouseholder( EssentialPart& essential, Scalar& tau, RealScalar& beta) const { using std::sqrt; using numext::conj; EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart) VectorBlock tail(derived(), 1, size()-1); RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm(); Scalar c0 = coeff(0); if(tailSqNorm == RealScalar(0) && numext::imag(c0)==RealScalar(0)) { tau = RealScalar(0); beta = numext::real(c0); essential.setZero(); } else { beta = sqrt(numext::abs2(c0) + tailSqNorm); if (numext::real(c0)>=RealScalar(0)) beta = -beta; essential = tail / (c0 - beta); tau = conj((beta - c0) / beta); } } /** Apply the elementary reflector H given by * \f$ H = I - tau v v^*\f$ * with * \f$ v^T = [1 essential^T] \f$ * from the left to a vector or matrix. * * On input: * \param essential the essential part of the vector \c v * \param tau the scaling factor of the Householder transformation * \param workspace a pointer to working space with at least * this->cols() * essential.size() entries * * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), * MatrixBase::applyHouseholderOnTheRight() */ template template void MatrixBase::applyHouseholderOnTheLeft( const EssentialPart& essential, const Scalar& tau, Scalar* workspace) { if(rows() == 1) { *this *= Scalar(1)-tau; } else { Map::type> tmp(workspace,cols()); Block bottom(derived(), 1, 0, rows()-1, cols()); tmp.noalias() = essential.adjoint() * bottom; tmp += this->row(0); this->row(0) -= tau * tmp; bottom.noalias() -= tau * essential * tmp; } } /** Apply the elementary reflector H given by * \f$ H = I - tau v v^*\f$ * with * \f$ v^T = [1 essential^T] \f$ * from the right to a vector or matrix. * * On input: * \param essential the essential part of the vector \c v * \param tau the scaling factor of the Householder transformation * \param workspace a pointer to working space with at least * this->cols() * essential.size() entries * * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), * MatrixBase::applyHouseholderOnTheLeft() */ template template void MatrixBase::applyHouseholderOnTheRight( const EssentialPart& essential, const Scalar& tau, Scalar* workspace) { if(cols() == 1) { *this *= Scalar(1)-tau; } else { Map::type> tmp(workspace,rows()); Block right(derived(), 0, 1, rows(), cols()-1); tmp.noalias() = right * essential.conjugate(); tmp += this->col(0); this->col(0) -= tau * tmp; right.noalias() -= tau * tmp * essential.transpose(); } } } // end namespace Eigen #endif // EIGEN_HOUSEHOLDER_H RcppEigen/inst/include/Eigen/src/Core/0000755000176200001440000000000012647570720017246 5ustar liggesusersRcppEigen/inst/include/Eigen/src/Core/Array.h0000644000176200001440000002733412647570720020506 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARRAY_H #define EIGEN_ARRAY_H namespace Eigen { /** \class Array * \ingroup Core_Module * * \brief General-purpose arrays with easy API for coefficient-wise operations * * The %Array class is very similar to the Matrix class. It provides * general-purpose one- and two-dimensional arrays. The difference between the * %Array and the %Matrix class is primarily in the API: the API for the * %Array class provides easy access to coefficient-wise operations, while the * API for the %Matrix class provides easy access to linear-algebra * operations. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN. * * \sa \ref TutorialArrayClass, \ref TopicClassHierarchy */ namespace internal { template struct traits > : traits > { typedef ArrayXpr XprKind; typedef ArrayBase > XprBase; }; } template class Array : public PlainObjectBase > { public: typedef PlainObjectBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(Array) enum { Options = _Options }; typedef typename Base::PlainObject PlainObject; protected: template friend struct internal::conservative_resize_like_impl; using Base::m_storage; public: using Base::base; using Base::coeff; using Base::coeffRef; /** * The usage of * using Base::operator=; * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped * the usage of 'using'. This should be done only for operator=. */ template EIGEN_STRONG_INLINE Array& operator=(const EigenBase &other) { return Base::operator=(other); } /** Copies the value of the expression \a other into \c *this with automatic resizing. * * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), * it will be initialized. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template EIGEN_STRONG_INLINE Array& operator=(const ArrayBase& other) { return Base::_set(other); } /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ EIGEN_STRONG_INLINE Array& operator=(const Array& other) { return Base::_set(other); } /** Default constructor. * * For fixed-size matrices, does nothing. * * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix * is called a null matrix. This constructor is the unique way to create null matrices: resizing * a matrix to 0 is not supported. * * \sa resize(Index,Index) */ EIGEN_STRONG_INLINE Array() : Base() { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #ifndef EIGEN_PARSED_BY_DOXYGEN // FIXME is it still needed ?? /** \internal */ Array(internal::constructor_without_unaligned_array_assert) : Base(internal::constructor_without_unaligned_array_assert()) { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #endif #ifdef EIGEN_HAVE_RVALUE_REFERENCES Array(Array&& other) : Base(std::move(other)) { Base::_check_template_params(); if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic) Base::_set_noalias(other); } Array& operator=(Array&& other) { other.swap(*this); return *this; } #endif /** Constructs a vector or row-vector with given dimension. \only_for_vectors * * Note that this is only useful for dynamic-size vectors. For fixed-size vectors, * it is redundant to pass the dimension here, so it makes more sense to use the default * constructor Matrix() instead. */ EIGEN_STRONG_INLINE explicit Array(Index dim) : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_ONLY(Array) eigen_assert(dim >= 0); eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #ifndef EIGEN_PARSED_BY_DOXYGEN template EIGEN_STRONG_INLINE Array(const T0& val0, const T1& val1) { Base::_check_template_params(); this->template _init2(val0, val1); } #else /** constructs an uninitialized matrix with \a rows rows and \a cols columns. * * This is useful for dynamic-size matrices. For fixed-size matrices, * it is redundant to pass these parameters, so one should use the default constructor * Matrix() instead. */ Array(Index rows, Index cols); /** constructs an initialized 2D vector with given coefficients */ Array(const Scalar& val0, const Scalar& val1); #endif /** constructs an initialized 3D vector with given coefficients */ EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3) m_storage.data()[0] = val0; m_storage.data()[1] = val1; m_storage.data()[2] = val2; } /** constructs an initialized 4D vector with given coefficients */ EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, const Scalar& val3) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4) m_storage.data()[0] = val0; m_storage.data()[1] = val1; m_storage.data()[2] = val2; m_storage.data()[3] = val3; } explicit Array(const Scalar *data); /** Constructor copying the value of the expression \a other */ template EIGEN_STRONG_INLINE Array(const ArrayBase& other) : Base(other.rows() * other.cols(), other.rows(), other.cols()) { Base::_check_template_params(); Base::_set_noalias(other); } /** Copy constructor */ EIGEN_STRONG_INLINE Array(const Array& other) : Base(other.rows() * other.cols(), other.rows(), other.cols()) { Base::_check_template_params(); Base::_set_noalias(other); } /** Copy constructor with in-place evaluation */ template EIGEN_STRONG_INLINE Array(const ReturnByValue& other) { Base::_check_template_params(); Base::resize(other.rows(), other.cols()); other.evalTo(*this); } /** \sa MatrixBase::operator=(const EigenBase&) */ template EIGEN_STRONG_INLINE Array(const EigenBase &other) : Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) { Base::_check_template_params(); Base::_resize_to_match(other); *this = other; } /** Override MatrixBase::swap() since for dynamic-sized matrices of same type it is enough to swap the * data pointers. */ template void swap(ArrayBase const & other) { this->_swap(other.derived()); } inline Index innerStride() const { return 1; } inline Index outerStride() const { return this->innerSize(); } #ifdef EIGEN_ARRAY_PLUGIN #include EIGEN_ARRAY_PLUGIN #endif private: template friend struct internal::matrix_swap_impl; }; /** \defgroup arraytypedefs Global array typedefs * \ingroup Core_Module * * Eigen defines several typedef shortcuts for most common 1D and 2D array types. * * The general patterns are the following: * * \c ArrayRowsColsType where \c Rows and \c Cols can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd * for complex double. * * For example, \c Array33d is a fixed-size 3x3 array type of doubles, and \c ArrayXXf is a dynamic-size matrix of floats. * * There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is * a fixed-size 1D array of 4 complex floats. * * \sa class Array */ #define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ /** \ingroup arraytypedefs */ \ typedef Array Array##SizeSuffix##SizeSuffix##TypeSuffix; \ /** \ingroup arraytypedefs */ \ typedef Array Array##SizeSuffix##TypeSuffix; #define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ /** \ingroup arraytypedefs */ \ typedef Array Array##Size##X##TypeSuffix; \ /** \ingroup arraytypedefs */ \ typedef Array Array##X##Size##TypeSuffix; #define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int, i) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float, f) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double, d) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex, cf) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex, cd) #undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES #undef EIGEN_MAKE_ARRAY_TYPEDEFS #undef EIGEN_MAKE_ARRAY_TYPEDEFS_LARGE #define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \ using Eigen::Matrix##SizeSuffix##TypeSuffix; \ using Eigen::Vector##SizeSuffix##TypeSuffix; \ using Eigen::RowVector##SizeSuffix##TypeSuffix; #define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \ #define EIGEN_USING_ARRAY_TYPEDEFS \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd) } // end namespace Eigen #endif // EIGEN_ARRAY_H RcppEigen/inst/include/Eigen/src/Core/PlainObjectBase.h0000644000176200001440000010601412647570720022406 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DENSESTORAGEBASE_H #define EIGEN_DENSESTORAGEBASE_H #if defined(EIGEN_INITIALIZE_MATRICES_BY_ZERO) # define EIGEN_INITIALIZE_COEFFS # define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i::quiet_NaN(); #else # undef EIGEN_INITIALIZE_COEFFS # define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #endif namespace Eigen { namespace internal { template struct check_rows_cols_for_overflow { template static EIGEN_ALWAYS_INLINE void run(Index, Index) { } }; template<> struct check_rows_cols_for_overflow { template static EIGEN_ALWAYS_INLINE void run(Index rows, Index cols) { // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242 // we assume Index is signed Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed bool error = (rows == 0 || cols == 0) ? false : (rows > max_index / cols); if (error) throw_std_bad_alloc(); } }; template struct conservative_resize_like_impl; template struct matrix_swap_impl; } // end namespace internal /** \class PlainObjectBase * \brief %Dense storage base class for matrices and arrays. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN. * * \sa \ref TopicClassHierarchy */ #ifdef EIGEN_PARSED_BY_DOXYGEN namespace internal { // this is a warkaround to doxygen not being able to understand the inheritence logic // when it is hidden by the dense_xpr_base helper struct. template struct dense_xpr_base_dispatcher_for_doxygen;// : public MatrixBase {}; /** This class is just a workaround for Doxygen and it does not not actually exist. */ template struct dense_xpr_base_dispatcher_for_doxygen > : public MatrixBase > {}; /** This class is just a workaround for Doxygen and it does not not actually exist. */ template struct dense_xpr_base_dispatcher_for_doxygen > : public ArrayBase > {}; } // namespace internal template class PlainObjectBase : public internal::dense_xpr_base_dispatcher_for_doxygen #else template class PlainObjectBase : public internal::dense_xpr_base::type #endif { public: enum { Options = internal::traits::Options }; typedef typename internal::dense_xpr_base::type Base; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; typedef Derived DenseType; using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; using Base::MaxRowsAtCompileTime; using Base::MaxColsAtCompileTime; using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; template friend class Eigen::Map; friend class Eigen::Map; typedef Eigen::Map MapType; friend class Eigen::Map; typedef const Eigen::Map ConstMapType; friend class Eigen::Map; typedef Eigen::Map AlignedMapType; friend class Eigen::Map; typedef const Eigen::Map ConstAlignedMapType; template struct StridedMapType { typedef Eigen::Map type; }; template struct StridedConstMapType { typedef Eigen::Map type; }; template struct StridedAlignedMapType { typedef Eigen::Map type; }; template struct StridedConstAlignedMapType { typedef Eigen::Map type; }; protected: DenseStorage m_storage; public: enum { NeedsToAlign = SizeAtCompileTime != Dynamic && (internal::traits::Flags & AlignedBit) != 0 }; EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) Base& base() { return *static_cast(this); } const Base& base() const { return *static_cast(this); } EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); } EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); } EIGEN_STRONG_INLINE const Scalar& coeff(Index rowId, Index colId) const { if(Flags & RowMajorBit) return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major return m_storage.data()[rowId + colId * m_storage.rows()]; } EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const { return m_storage.data()[index]; } EIGEN_STRONG_INLINE Scalar& coeffRef(Index rowId, Index colId) { if(Flags & RowMajorBit) return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major return m_storage.data()[rowId + colId * m_storage.rows()]; } EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_storage.data()[index]; } EIGEN_STRONG_INLINE const Scalar& coeffRef(Index rowId, Index colId) const { if(Flags & RowMajorBit) return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major return m_storage.data()[rowId + colId * m_storage.rows()]; } EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const { return m_storage.data()[index]; } /** \internal */ template EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const { return internal::ploadt (m_storage.data() + (Flags & RowMajorBit ? colId + rowId * m_storage.cols() : rowId + colId * m_storage.rows())); } /** \internal */ template EIGEN_STRONG_INLINE PacketScalar packet(Index index) const { return internal::ploadt(m_storage.data() + index); } /** \internal */ template EIGEN_STRONG_INLINE void writePacket(Index rowId, Index colId, const PacketScalar& val) { internal::pstoret (m_storage.data() + (Flags & RowMajorBit ? colId + rowId * m_storage.cols() : rowId + colId * m_storage.rows()), val); } /** \internal */ template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& val) { internal::pstoret(m_storage.data() + index, val); } /** \returns a const pointer to the data array of this matrix */ EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } /** \returns a pointer to the data array of this matrix */ EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } /** Resizes \c *this to a \a rows x \a cols matrix. * * This method is intended for dynamic-size matrices, although it is legal to call it on any * matrix as long as fixed dimensions are left unchanged. If you only want to change the number * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t). * * If the current number of coefficients of \c *this exactly matches the * product \a rows * \a cols, then no memory allocation is performed and * the current values are left unchanged. In all other cases, including * shrinking, the data is reallocated and all previous values are lost. * * Example: \include Matrix_resize_int_int.cpp * Output: \verbinclude Matrix_resize_int_int.out * * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t) */ EIGEN_STRONG_INLINE void resize(Index nbRows, Index nbCols) { eigen_assert( EIGEN_IMPLIES(RowsAtCompileTime!=Dynamic,nbRows==RowsAtCompileTime) && EIGEN_IMPLIES(ColsAtCompileTime!=Dynamic,nbCols==ColsAtCompileTime) && EIGEN_IMPLIES(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic,nbRows<=MaxRowsAtCompileTime) && EIGEN_IMPLIES(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic,nbCols<=MaxColsAtCompileTime) && nbRows>=0 && nbCols>=0 && "Invalid sizes when resizing a matrix or array."); internal::check_rows_cols_for_overflow::run(nbRows, nbCols); #ifdef EIGEN_INITIALIZE_COEFFS Index size = nbRows*nbCols; bool size_changed = size != this->size(); m_storage.resize(size, nbRows, nbCols); if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #else internal::check_rows_cols_for_overflow::run(nbRows, nbCols); m_storage.resize(nbRows*nbCols, nbRows, nbCols); #endif } /** Resizes \c *this to a vector of length \a size * * \only_for_vectors. This method does not work for * partially dynamic matrices when the static dimension is anything other * than 1. For example it will not work with Matrix. * * Example: \include Matrix_resize_int.cpp * Output: \verbinclude Matrix_resize_int.out * * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t) */ inline void resize(Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase) eigen_assert(((SizeAtCompileTime == Dynamic && (MaxSizeAtCompileTime==Dynamic || size<=MaxSizeAtCompileTime)) || SizeAtCompileTime == size) && size>=0); #ifdef EIGEN_INITIALIZE_COEFFS bool size_changed = size != this->size(); #endif if(RowsAtCompileTime == 1) m_storage.resize(size, 1, size); else m_storage.resize(size, size, 1); #ifdef EIGEN_INITIALIZE_COEFFS if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #endif } /** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the special value \c NoChange * as in the example below. * * Example: \include Matrix_resize_NoChange_int.cpp * Output: \verbinclude Matrix_resize_NoChange_int.out * * \sa resize(Index,Index) */ inline void resize(NoChange_t, Index nbCols) { resize(rows(), nbCols); } /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange * as in the example below. * * Example: \include Matrix_resize_int_NoChange.cpp * Output: \verbinclude Matrix_resize_int_NoChange.out * * \sa resize(Index,Index) */ inline void resize(Index nbRows, NoChange_t) { resize(nbRows, cols()); } /** Resizes \c *this to have the same dimensions as \a other. * Takes care of doing all the checking that's needed. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template EIGEN_STRONG_INLINE void resizeLike(const EigenBase& _other) { const OtherDerived& other = _other.derived(); internal::check_rows_cols_for_overflow::run(other.rows(), other.cols()); const Index othersize = other.rows()*other.cols(); if(RowsAtCompileTime == 1) { eigen_assert(other.rows() == 1 || other.cols() == 1); resize(1, othersize); } else if(ColsAtCompileTime == 1) { eigen_assert(other.rows() == 1 || other.cols() == 1); resize(othersize, 1); } else resize(other.rows(), other.cols()); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * * The method is intended for matrices of dynamic size. If you only want to change the number * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or * conservativeResize(Index, NoChange_t). * * Matrices are resized relative to the top-left element. In case values need to be * appended to the matrix they will be uninitialized. */ EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, Index nbCols) { internal::conservative_resize_like_impl::run(*this, nbRows, nbCols); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * * As opposed to conservativeResize(Index rows, Index cols), this version leaves * the number of columns unchanged. * * In case the matrix is growing, new rows will be uninitialized. */ EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, NoChange_t) { // Note: see the comment in conservativeResize(Index,Index) conservativeResize(nbRows, cols()); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * * As opposed to conservativeResize(Index rows, Index cols), this version leaves * the number of rows unchanged. * * In case the matrix is growing, new columns will be uninitialized. */ EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index nbCols) { // Note: see the comment in conservativeResize(Index,Index) conservativeResize(rows(), nbCols); } /** Resizes the vector to \a size while retaining old values. * * \only_for_vectors. This method does not work for * partially dynamic matrices when the static dimension is anything other * than 1. For example it will not work with Matrix. * * When values are appended, they will be uninitialized. */ EIGEN_STRONG_INLINE void conservativeResize(Index size) { internal::conservative_resize_like_impl::run(*this, size); } /** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched. * * The method is intended for matrices of dynamic size. If you only want to change the number * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or * conservativeResize(Index, NoChange_t). * * Matrices are resized relative to the top-left element. In case values need to be * appended to the matrix they will copied from \c other. */ template EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase& other) { internal::conservative_resize_like_impl::run(*this, other); } /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ EIGEN_STRONG_INLINE Derived& operator=(const PlainObjectBase& other) { return _set(other); } /** \sa MatrixBase::lazyAssign() */ template EIGEN_STRONG_INLINE Derived& lazyAssign(const DenseBase& other) { _resize_to_match(other); return Base::lazyAssign(other.derived()); } template EIGEN_STRONG_INLINE Derived& operator=(const ReturnByValue& func) { resize(func.rows(), func.cols()); return Base::operator=(func); } EIGEN_STRONG_INLINE PlainObjectBase() : m_storage() { // _check_template_params(); // EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #ifndef EIGEN_PARSED_BY_DOXYGEN // FIXME is it still needed ? /** \internal */ PlainObjectBase(internal::constructor_without_unaligned_array_assert) : m_storage(internal::constructor_without_unaligned_array_assert()) { // _check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #endif #ifdef EIGEN_HAVE_RVALUE_REFERENCES PlainObjectBase(PlainObjectBase&& other) : m_storage( std::move(other.m_storage) ) { } PlainObjectBase& operator=(PlainObjectBase&& other) { using std::swap; swap(m_storage, other.m_storage); return *this; } #endif /** Copy constructor */ EIGEN_STRONG_INLINE PlainObjectBase(const PlainObjectBase& other) : m_storage() { _check_template_params(); lazyAssign(other); } template EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase &other) : m_storage() { _check_template_params(); lazyAssign(other); } EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols) : m_storage(a_size, nbRows, nbCols) { // _check_template_params(); // EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } /** \copydoc MatrixBase::operator=(const EigenBase&) */ template EIGEN_STRONG_INLINE Derived& operator=(const EigenBase &other) { _resize_to_match(other); Base::operator=(other.derived()); return this->derived(); } /** \sa MatrixBase::operator=(const EigenBase&) */ template EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase &other) : m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) { _check_template_params(); internal::check_rows_cols_for_overflow::run(other.derived().rows(), other.derived().cols()); Base::operator=(other.derived()); } /** \name Map * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects, * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned * \a data pointers. * * \see class Map */ //@{ static inline ConstMapType Map(const Scalar* data) { return ConstMapType(data); } static inline MapType Map(Scalar* data) { return MapType(data); } static inline ConstMapType Map(const Scalar* data, Index size) { return ConstMapType(data, size); } static inline MapType Map(Scalar* data, Index size) { return MapType(data, size); } static inline ConstMapType Map(const Scalar* data, Index rows, Index cols) { return ConstMapType(data, rows, cols); } static inline MapType Map(Scalar* data, Index rows, Index cols) { return MapType(data, rows, cols); } static inline ConstAlignedMapType MapAligned(const Scalar* data) { return ConstAlignedMapType(data); } static inline AlignedMapType MapAligned(Scalar* data) { return AlignedMapType(data); } static inline ConstAlignedMapType MapAligned(const Scalar* data, Index size) { return ConstAlignedMapType(data, size); } static inline AlignedMapType MapAligned(Scalar* data, Index size) { return AlignedMapType(data, size); } static inline ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols) { return ConstAlignedMapType(data, rows, cols); } static inline AlignedMapType MapAligned(Scalar* data, Index rows, Index cols) { return AlignedMapType(data, rows, cols); } template static inline typename StridedConstMapType >::type Map(const Scalar* data, const Stride& stride) { return typename StridedConstMapType >::type(data, stride); } template static inline typename StridedMapType >::type Map(Scalar* data, const Stride& stride) { return typename StridedMapType >::type(data, stride); } template static inline typename StridedConstMapType >::type Map(const Scalar* data, Index size, const Stride& stride) { return typename StridedConstMapType >::type(data, size, stride); } template static inline typename StridedMapType >::type Map(Scalar* data, Index size, const Stride& stride) { return typename StridedMapType >::type(data, size, stride); } template static inline typename StridedConstMapType >::type Map(const Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedConstMapType >::type(data, rows, cols, stride); } template static inline typename StridedMapType >::type Map(Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedMapType >::type(data, rows, cols, stride); } template static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, const Stride& stride) { return typename StridedConstAlignedMapType >::type(data, stride); } template static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, const Stride& stride) { return typename StridedAlignedMapType >::type(data, stride); } template static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index size, const Stride& stride) { return typename StridedConstAlignedMapType >::type(data, size, stride); } template static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index size, const Stride& stride) { return typename StridedAlignedMapType >::type(data, size, stride); } template static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedConstAlignedMapType >::type(data, rows, cols, stride); } template static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedAlignedMapType >::type(data, rows, cols, stride); } //@} using Base::setConstant; Derived& setConstant(Index size, const Scalar& value); Derived& setConstant(Index rows, Index cols, const Scalar& value); using Base::setZero; Derived& setZero(Index size); Derived& setZero(Index rows, Index cols); using Base::setOnes; Derived& setOnes(Index size); Derived& setOnes(Index rows, Index cols); using Base::setRandom; Derived& setRandom(Index size); Derived& setRandom(Index rows, Index cols); #ifdef EIGEN_PLAINOBJECTBASE_PLUGIN #include EIGEN_PLAINOBJECTBASE_PLUGIN #endif protected: /** \internal Resizes *this in preparation for assigning \a other to it. * Takes care of doing all the checking that's needed. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase& other) { #ifdef EIGEN_NO_AUTOMATIC_RESIZING eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size()) : (rows() == other.rows() && cols() == other.cols()))) && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined"); EIGEN_ONLY_USED_FOR_DEBUG(other); if(this->size()==0) resizeLike(other); #else resizeLike(other); #endif } /** * \brief Copies the value of the expression \a other into \c *this with automatic resizing. * * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), * it will be initialized. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. * * \sa operator=(const MatrixBase&), _set_noalias() * * \internal */ template EIGEN_STRONG_INLINE Derived& _set(const DenseBase& other) { _set_selector(other.derived(), typename internal::conditional(int(OtherDerived::Flags) & EvalBeforeAssigningBit), internal::true_type, internal::false_type>::type()); return this->derived(); } template EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::true_type&) { _set_noalias(other.eval()); } template EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::false_type&) { _set_noalias(other); } /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which * is the case when creating a new matrix) so one can enforce lazy evaluation. * * \sa operator=(const MatrixBase&), _set() */ template EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase& other) { // I don't think we need this resize call since the lazyAssign will anyways resize // and lazyAssign will be called by the assign selector. //_resize_to_match(other); // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because // it wouldn't allow to copy a row-vector into a column-vector. return internal::assign_selector::run(this->derived(), other.derived()); } template EIGEN_STRONG_INLINE void _init2(Index nbRows, Index nbCols, typename internal::enable_if::type* = 0) { EIGEN_STATIC_ASSERT(bool(NumTraits::IsInteger) && bool(NumTraits::IsInteger), FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED) resize(nbRows,nbCols); } template EIGEN_STRONG_INLINE void _init2(const Scalar& val0, const Scalar& val1, typename internal::enable_if::type* = 0) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) m_storage.data()[0] = val0; m_storage.data()[1] = val1; } template friend struct internal::matrix_swap_impl; /** \internal generic implementation of swap for dense storage since for dynamic-sized matrices of same type it is enough to swap the * data pointers. */ template void _swap(DenseBase const & other) { enum { SwapPointers = internal::is_same::value && Base::SizeAtCompileTime==Dynamic }; internal::matrix_swap_impl::run(this->derived(), other.const_cast_derived()); } public: #ifndef EIGEN_PARSED_BY_DOXYGEN static EIGEN_STRONG_INLINE void _check_template_params() { EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (Options&RowMajor)==RowMajor) && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (Options&RowMajor)==0) && ((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0)) && ((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0)) && ((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0)) && ((MaxColsAtCompileTime == Dynamic) || (MaxColsAtCompileTime >= 0)) && (MaxRowsAtCompileTime == RowsAtCompileTime || RowsAtCompileTime==Dynamic) && (MaxColsAtCompileTime == ColsAtCompileTime || ColsAtCompileTime==Dynamic) && (Options & (DontAlign|RowMajor)) == Options), INVALID_MATRIX_TEMPLATE_PARAMETERS) } #endif private: enum { ThisConstantIsPrivateInPlainObjectBase }; }; namespace internal { template struct conservative_resize_like_impl { typedef typename Derived::Index Index; static void run(DenseBase& _this, Index rows, Index cols) { if (_this.rows() == rows && _this.cols() == cols) return; EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows (!Derived::IsRowMajor && _this.rows() == rows) ) // column-major and we change only the number of columns { internal::check_rows_cols_for_overflow::run(rows, cols); _this.derived().m_storage.conservativeResize(rows*cols,rows,cols); } else { // The storage order does not allow us to use reallocation. typename Derived::PlainObject tmp(rows,cols); const Index common_rows = (std::min)(rows, _this.rows()); const Index common_cols = (std::min)(cols, _this.cols()); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); _this.derived().swap(tmp); } } static void run(DenseBase& _this, const DenseBase& other) { if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index), // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good. EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived) if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows (!Derived::IsRowMajor && _this.rows() == other.rows()) ) // column-major and we change only the number of columns { const Index new_rows = other.rows() - _this.rows(); const Index new_cols = other.cols() - _this.cols(); _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols()); if (new_rows>0) _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows); else if (new_cols>0) _this.bottomRightCorner(other.rows(), new_cols) = other.rightCols(new_cols); } else { // The storage order does not allow us to use reallocation. typename Derived::PlainObject tmp(other); const Index common_rows = (std::min)(tmp.rows(), _this.rows()); const Index common_cols = (std::min)(tmp.cols(), _this.cols()); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); _this.derived().swap(tmp); } } }; // Here, the specialization for vectors inherits from the general matrix case // to allow calling .conservativeResize(rows,cols) on vectors. template struct conservative_resize_like_impl : conservative_resize_like_impl { using conservative_resize_like_impl::run; typedef typename Derived::Index Index; static void run(DenseBase& _this, Index size) { const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size; const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1; _this.derived().m_storage.conservativeResize(size,new_rows,new_cols); } static void run(DenseBase& _this, const DenseBase& other) { if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; const Index num_new_elements = other.size() - _this.size(); const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows(); const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1; _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols); if (num_new_elements > 0) _this.tail(num_new_elements) = other.tail(num_new_elements); } }; template struct matrix_swap_impl { static inline void run(MatrixTypeA& a, MatrixTypeB& b) { a.base().swap(b); } }; template struct matrix_swap_impl { static inline void run(MatrixTypeA& a, MatrixTypeB& b) { static_cast(a).m_storage.swap(static_cast(b).m_storage); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_DENSESTORAGEBASE_H RcppEigen/inst/include/Eigen/src/Core/Transpositions.h0000644000176200001440000003577512253717461022475 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRANSPOSITIONS_H #define EIGEN_TRANSPOSITIONS_H namespace Eigen { /** \class Transpositions * \ingroup Core_Module * * \brief Represents a sequence of transpositions (row/column interchange) * * \param SizeAtCompileTime the number of transpositions, or Dynamic * \param MaxSizeAtCompileTime the maximum number of transpositions, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it. * * This class represents a permutation transformation as a sequence of \em n transpositions * \f$[T_{n-1} \ldots T_{i} \ldots T_{0}]\f$. It is internally stored as a vector of integers \c indices. * Each transposition \f$ T_{i} \f$ applied on the left of a matrix (\f$ T_{i} M\f$) interchanges * the rows \c i and \c indices[i] of the matrix \c M. * A transposition applied on the right (e.g., \f$ M T_{i}\f$) yields a column interchange. * * Compared to the class PermutationMatrix, such a sequence of transpositions is what is * computed during a decomposition with pivoting, and it is faster when applying the permutation in-place. * * To apply a sequence of transpositions to a matrix, simply use the operator * as in the following example: * \code * Transpositions tr; * MatrixXf mat; * mat = tr * mat; * \endcode * In this example, we detect that the matrix appears on both side, and so the transpositions * are applied in-place without any temporary or extra copy. * * \sa class PermutationMatrix */ namespace internal { template struct transposition_matrix_product_retval; } template class TranspositionsBase { typedef internal::traits Traits; public: typedef typename Traits::IndicesType IndicesType; typedef typename IndicesType::Scalar Index; Derived& derived() { return *static_cast(this); } const Derived& derived() const { return *static_cast(this); } /** Copies the \a other transpositions into \c *this */ template Derived& operator=(const TranspositionsBase& other) { indices() = other.indices(); return derived(); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ Derived& operator=(const TranspositionsBase& other) { indices() = other.indices(); return derived(); } #endif /** \returns the number of transpositions */ inline Index size() const { return indices().size(); } /** Direct access to the underlying index vector */ inline const Index& coeff(Index i) const { return indices().coeff(i); } /** Direct access to the underlying index vector */ inline Index& coeffRef(Index i) { return indices().coeffRef(i); } /** Direct access to the underlying index vector */ inline const Index& operator()(Index i) const { return indices()(i); } /** Direct access to the underlying index vector */ inline Index& operator()(Index i) { return indices()(i); } /** Direct access to the underlying index vector */ inline const Index& operator[](Index i) const { return indices()(i); } /** Direct access to the underlying index vector */ inline Index& operator[](Index i) { return indices()(i); } /** const version of indices(). */ const IndicesType& indices() const { return derived().indices(); } /** \returns a reference to the stored array representing the transpositions. */ IndicesType& indices() { return derived().indices(); } /** Resizes to given size. */ inline void resize(int newSize) { indices().resize(newSize); } /** Sets \c *this to represents an identity transformation */ void setIdentity() { for(int i = 0; i < indices().size(); ++i) coeffRef(i) = i; } // FIXME: do we want such methods ? // might be usefull when the target matrix expression is complex, e.g.: // object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..); /* template void applyForwardToRows(MatrixType& mat) const { for(Index k=0 ; k void applyBackwardToRows(MatrixType& mat) const { for(Index k=size()-1 ; k>=0 ; --k) if(m_indices(k)!=k) mat.row(k).swap(mat.row(m_indices(k))); } */ /** \returns the inverse transformation */ inline Transpose inverse() const { return Transpose(derived()); } /** \returns the tranpose transformation */ inline Transpose transpose() const { return Transpose(derived()); } protected: }; namespace internal { template struct traits > { typedef IndexType Index; typedef Matrix IndicesType; }; } template class Transpositions : public TranspositionsBase > { typedef internal::traits Traits; public: typedef TranspositionsBase Base; typedef typename Traits::IndicesType IndicesType; typedef typename IndicesType::Scalar Index; inline Transpositions() {} /** Copy constructor. */ template inline Transpositions(const TranspositionsBase& other) : m_indices(other.indices()) {} #ifndef EIGEN_PARSED_BY_DOXYGEN /** Standard copy constructor. Defined only to prevent a default copy constructor * from hiding the other templated constructor */ inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {} #endif /** Generic constructor from expression of the transposition indices. */ template explicit inline Transpositions(const MatrixBase& a_indices) : m_indices(a_indices) {} /** Copies the \a other transpositions into \c *this */ template Transpositions& operator=(const TranspositionsBase& other) { return Base::operator=(other); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ Transpositions& operator=(const Transpositions& other) { m_indices = other.m_indices; return *this; } #endif /** Constructs an uninitialized permutation matrix of given size. */ inline Transpositions(Index size) : m_indices(size) {} /** const version of indices(). */ const IndicesType& indices() const { return m_indices; } /** \returns a reference to the stored array representing the transpositions. */ IndicesType& indices() { return m_indices; } protected: IndicesType m_indices; }; namespace internal { template struct traits,_PacketAccess> > { typedef IndexType Index; typedef Map, _PacketAccess> IndicesType; }; } template class Map,PacketAccess> : public TranspositionsBase,PacketAccess> > { typedef internal::traits Traits; public: typedef TranspositionsBase Base; typedef typename Traits::IndicesType IndicesType; typedef typename IndicesType::Scalar Index; inline Map(const Index* indicesPtr) : m_indices(indicesPtr) {} inline Map(const Index* indicesPtr, Index size) : m_indices(indicesPtr,size) {} /** Copies the \a other transpositions into \c *this */ template Map& operator=(const TranspositionsBase& other) { return Base::operator=(other); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ Map& operator=(const Map& other) { m_indices = other.m_indices; return *this; } #endif /** const version of indices(). */ const IndicesType& indices() const { return m_indices; } /** \returns a reference to the stored array representing the transpositions. */ IndicesType& indices() { return m_indices; } protected: IndicesType m_indices; }; namespace internal { template struct traits > { typedef typename _IndicesType::Scalar Index; typedef _IndicesType IndicesType; }; } template class TranspositionsWrapper : public TranspositionsBase > { typedef internal::traits Traits; public: typedef TranspositionsBase Base; typedef typename Traits::IndicesType IndicesType; typedef typename IndicesType::Scalar Index; inline TranspositionsWrapper(IndicesType& a_indices) : m_indices(a_indices) {} /** Copies the \a other transpositions into \c *this */ template TranspositionsWrapper& operator=(const TranspositionsBase& other) { return Base::operator=(other); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ TranspositionsWrapper& operator=(const TranspositionsWrapper& other) { m_indices = other.m_indices; return *this; } #endif /** const version of indices(). */ const IndicesType& indices() const { return m_indices; } /** \returns a reference to the stored array representing the transpositions. */ IndicesType& indices() { return m_indices; } protected: const typename IndicesType::Nested m_indices; }; /** \returns the \a matrix with the \a transpositions applied to the columns. */ template inline const internal::transposition_matrix_product_retval operator*(const MatrixBase& matrix, const TranspositionsBase &transpositions) { return internal::transposition_matrix_product_retval (transpositions.derived(), matrix.derived()); } /** \returns the \a matrix with the \a transpositions applied to the rows. */ template inline const internal::transposition_matrix_product_retval operator*(const TranspositionsBase &transpositions, const MatrixBase& matrix) { return internal::transposition_matrix_product_retval (transpositions.derived(), matrix.derived()); } namespace internal { template struct traits > { typedef typename MatrixType::PlainObject ReturnType; }; template struct transposition_matrix_product_retval : public ReturnByValue > { typedef typename remove_all::type MatrixTypeNestedCleaned; typedef typename TranspositionType::Index Index; transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix) : m_transpositions(tr), m_matrix(matrix) {} inline int rows() const { return m_matrix.rows(); } inline int cols() const { return m_matrix.cols(); } template inline void evalTo(Dest& dst) const { const int size = m_transpositions.size(); Index j = 0; if(!(is_same::value && extract_data(dst) == extract_data(m_matrix))) dst = m_matrix; for(int k=(Transposed?size-1:0) ; Transposed?k>=0:k class Transpose > { typedef TranspositionsDerived TranspositionType; typedef typename TranspositionType::IndicesType IndicesType; public: Transpose(const TranspositionType& t) : m_transpositions(t) {} inline int size() const { return m_transpositions.size(); } /** \returns the \a matrix with the inverse transpositions applied to the columns. */ template friend inline const internal::transposition_matrix_product_retval operator*(const MatrixBase& matrix, const Transpose& trt) { return internal::transposition_matrix_product_retval(trt.m_transpositions, matrix.derived()); } /** \returns the \a matrix with the inverse transpositions applied to the rows. */ template inline const internal::transposition_matrix_product_retval operator*(const MatrixBase& matrix) const { return internal::transposition_matrix_product_retval(m_transpositions, matrix.derived()); } protected: const TranspositionType& m_transpositions; }; } // end namespace Eigen #endif // EIGEN_TRANSPOSITIONS_H RcppEigen/inst/include/Eigen/src/Core/Functors.h0000644000176200001440000011372612550462273021230 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_FUNCTORS_H #define EIGEN_FUNCTORS_H namespace Eigen { namespace internal { // associative functors: /** \internal * \brief Template functor to compute the sum of two scalars * * \sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, MatrixBase::sum() */ template struct scalar_sum_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op) EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const { return internal::padd(a,b); } template EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const { return internal::predux(a); } }; template struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = packet_traits::HasAdd }; }; /** \internal * \brief Template functor to compute the product of two scalars * * \sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux() */ template struct scalar_product_op { enum { // TODO vectorize mixed product Vectorizable = is_same::value && packet_traits::HasMul && packet_traits::HasMul }; typedef typename scalar_product_traits::ReturnType result_type; EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op) EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const { return internal::pmul(a,b); } template EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const { return internal::predux_mul(a); } }; template struct functor_traits > { enum { Cost = (NumTraits::MulCost + NumTraits::MulCost)/2, // rough estimate! PacketAccess = scalar_product_op::Vectorizable }; }; /** \internal * \brief Template functor to compute the conjugate product of two scalars * * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y) */ template struct scalar_conj_product_op { enum { Conj = NumTraits::IsComplex }; typedef typename scalar_product_traits::ReturnType result_type; EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op) EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return conj_helper().pmul(a,b); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const { return conj_helper().pmul(a,b); } }; template struct functor_traits > { enum { Cost = NumTraits::MulCost, PacketAccess = internal::is_same::value && packet_traits::HasMul }; }; /** \internal * \brief Template functor to compute the min of two scalars * * \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class VectorwiseOp, MatrixBase::minCoeff() */ template struct scalar_min_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op) EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return (min)(a, b); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const { return internal::pmin(a,b); } template EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const { return internal::predux_min(a); } }; template struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = packet_traits::HasMin }; }; /** \internal * \brief Template functor to compute the max of two scalars * * \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class VectorwiseOp, MatrixBase::maxCoeff() */ template struct scalar_max_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op) EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return (max)(a, b); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const { return internal::pmax(a,b); } template EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const { return internal::predux_max(a); } }; template struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = packet_traits::HasMax }; }; /** \internal * \brief Template functor to compute the hypot of two scalars * * \sa MatrixBase::stableNorm(), class Redux */ template struct scalar_hypot_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_hypot_op) // typedef typename NumTraits::Real result_type; EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const { using std::max; using std::min; using std::sqrt; Scalar p = (max)(_x, _y); Scalar q = (min)(_x, _y); Scalar qp = q/p; return p * sqrt(Scalar(1) + qp*qp); } }; template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess=0 }; }; /** \internal * \brief Template functor to compute the pow of two scalars */ template struct scalar_binary_pow_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_binary_pow_op) inline Scalar operator() (const Scalar& a, const OtherScalar& b) const { return numext::pow(a, b); } }; template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; }; // other binary functors: /** \internal * \brief Template functor to compute the difference of two scalars * * \sa class CwiseBinaryOp, MatrixBase::operator- */ template struct scalar_difference_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op) EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a - b; } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const { return internal::psub(a,b); } }; template struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = packet_traits::HasSub }; }; /** \internal * \brief Template functor to compute the quotient of two scalars * * \sa class CwiseBinaryOp, Cwise::operator/() */ template struct scalar_quotient_op { enum { // TODO vectorize mixed product Vectorizable = is_same::value && packet_traits::HasDiv && packet_traits::HasDiv }; typedef typename scalar_product_traits::ReturnType result_type; EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op) EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a / b; } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const { return internal::pdiv(a,b); } }; template struct functor_traits > { enum { Cost = (NumTraits::MulCost + NumTraits::MulCost), // rough estimate! PacketAccess = scalar_quotient_op::Vectorizable }; }; /** \internal * \brief Template functor to compute the and of two booleans * * \sa class CwiseBinaryOp, ArrayBase::operator&& */ struct scalar_boolean_and_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_and_op) EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a && b; } }; template<> struct functor_traits { enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; /** \internal * \brief Template functor to compute the or of two booleans * * \sa class CwiseBinaryOp, ArrayBase::operator|| */ struct scalar_boolean_or_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_or_op) EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a || b; } }; template<> struct functor_traits { enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; /** \internal * \brief Template functors for comparison of two scalars * \todo Implement packet-comparisons */ template struct scalar_cmp_op; template struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; template struct result_of(Scalar,Scalar)> { typedef bool type; }; template struct scalar_cmp_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a==b;} }; template struct scalar_cmp_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a struct scalar_cmp_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<=b;} }; template struct scalar_cmp_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return !(a<=b || b<=a);} }; template struct scalar_cmp_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a!=b;} }; // unary functors: /** \internal * \brief Template functor to compute the opposite of a scalar * * \sa class CwiseUnaryOp, MatrixBase::operator- */ template struct scalar_opposite_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_opposite_op) EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return -a; } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pnegate(a); } }; template struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = packet_traits::HasNegate }; }; /** \internal * \brief Template functor to compute the absolute value of a scalar * * \sa class CwiseUnaryOp, Cwise::abs */ template struct scalar_abs_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op) typedef typename NumTraits::Real result_type; EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { using std::abs; return abs(a); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pabs(a); } }; template struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = packet_traits::HasAbs }; }; /** \internal * \brief Template functor to compute the squared absolute value of a scalar * * \sa class CwiseUnaryOp, Cwise::abs2 */ template struct scalar_abs2_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op) typedef typename NumTraits::Real result_type; EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::abs2(a); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pmul(a,a); } }; template struct functor_traits > { enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasAbs2 }; }; /** \internal * \brief Template functor to compute the conjugate of a complex value * * \sa class CwiseUnaryOp, MatrixBase::conjugate() */ template struct scalar_conjugate_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op) EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { using numext::conj; return conj(a); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); } }; template struct functor_traits > { enum { Cost = NumTraits::IsComplex ? NumTraits::AddCost : 0, PacketAccess = packet_traits::HasConj }; }; /** \internal * \brief Template functor to cast a scalar to another type * * \sa class CwiseUnaryOp, MatrixBase::cast() */ template struct scalar_cast_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) typedef NewType result_type; EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return cast(a); } }; template struct functor_traits > { enum { Cost = is_same::value ? 0 : NumTraits::AddCost, PacketAccess = false }; }; /** \internal * \brief Template functor to extract the real part of a complex * * \sa class CwiseUnaryOp, MatrixBase::real() */ template struct scalar_real_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op) typedef typename NumTraits::Real result_type; EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::real(a); } }; template struct functor_traits > { enum { Cost = 0, PacketAccess = false }; }; /** \internal * \brief Template functor to extract the imaginary part of a complex * * \sa class CwiseUnaryOp, MatrixBase::imag() */ template struct scalar_imag_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op) typedef typename NumTraits::Real result_type; EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::imag(a); } }; template struct functor_traits > { enum { Cost = 0, PacketAccess = false }; }; /** \internal * \brief Template functor to extract the real part of a complex as a reference * * \sa class CwiseUnaryOp, MatrixBase::real() */ template struct scalar_real_ref_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op) typedef typename NumTraits::Real result_type; EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::real_ref(*const_cast(&a)); } }; template struct functor_traits > { enum { Cost = 0, PacketAccess = false }; }; /** \internal * \brief Template functor to extract the imaginary part of a complex as a reference * * \sa class CwiseUnaryOp, MatrixBase::imag() */ template struct scalar_imag_ref_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op) typedef typename NumTraits::Real result_type; EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::imag_ref(*const_cast(&a)); } }; template struct functor_traits > { enum { Cost = 0, PacketAccess = false }; }; /** \internal * * \brief Template functor to compute the exponential of a scalar * * \sa class CwiseUnaryOp, Cwise::exp() */ template struct scalar_exp_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op) inline const Scalar operator() (const Scalar& a) const { using std::exp; return exp(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::pexp(a); } }; template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasExp }; }; /** \internal * * \brief Template functor to compute the logarithm of a scalar * * \sa class CwiseUnaryOp, Cwise::log() */ template struct scalar_log_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op) inline const Scalar operator() (const Scalar& a) const { using std::log; return log(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::plog(a); } }; template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasLog }; }; /** \internal * \brief Template functor to multiply a scalar by a fixed other one * * \sa class CwiseUnaryOp, MatrixBase::operator*, MatrixBase::operator/ */ /* NOTE why doing the pset1() in packetOp *is* an optimization ? * indeed it seems better to declare m_other as a Packet and do the pset1() once * in the constructor. However, in practice: * - GCC does not like m_other as a Packet and generate a load every time it needs it * - on the other hand GCC is able to moves the pset1() outside the loop :) * - simpler code ;) * (ICC and gcc 4.4 seems to perform well in both cases, the issue is visible with y = a*x + b*y) */ template struct scalar_multiple_op { typedef typename packet_traits::type Packet; // FIXME default copy constructors seems bugged with std::complex<> EIGEN_STRONG_INLINE scalar_multiple_op(const scalar_multiple_op& other) : m_other(other.m_other) { } EIGEN_STRONG_INLINE scalar_multiple_op(const Scalar& other) : m_other(other) { } EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; } EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pmul(a, pset1(m_other)); } typename add_const_on_value_type::Nested>::type m_other; }; template struct functor_traits > { enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; template struct scalar_multiple2_op { typedef typename scalar_product_traits::ReturnType result_type; EIGEN_STRONG_INLINE scalar_multiple2_op(const scalar_multiple2_op& other) : m_other(other.m_other) { } EIGEN_STRONG_INLINE scalar_multiple2_op(const Scalar2& other) : m_other(other) { } EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a * m_other; } typename add_const_on_value_type::Nested>::type m_other; }; template struct functor_traits > { enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; /** \internal * \brief Template functor to divide a scalar by a fixed other one * * This functor is used to implement the quotient of a matrix by * a scalar where the scalar type is not necessarily a floating point type. * * \sa class CwiseUnaryOp, MatrixBase::operator/ */ template struct scalar_quotient1_op { typedef typename packet_traits::type Packet; // FIXME default copy constructors seems bugged with std::complex<> EIGEN_STRONG_INLINE scalar_quotient1_op(const scalar_quotient1_op& other) : m_other(other.m_other) { } EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other) : m_other(other) {} EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; } EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pdiv(a, pset1(m_other)); } typename add_const_on_value_type::Nested>::type m_other; }; template struct functor_traits > { enum { Cost = 2 * NumTraits::MulCost, PacketAccess = packet_traits::HasDiv }; }; // nullary functors template struct scalar_constant_op { typedef typename packet_traits::type Packet; EIGEN_STRONG_INLINE scalar_constant_op(const scalar_constant_op& other) : m_other(other.m_other) { } EIGEN_STRONG_INLINE scalar_constant_op(const Scalar& other) : m_other(other) { } template EIGEN_STRONG_INLINE const Scalar operator() (Index, Index = 0) const { return m_other; } template EIGEN_STRONG_INLINE const Packet packetOp(Index, Index = 0) const { return internal::pset1(m_other); } const Scalar m_other; }; template struct functor_traits > // FIXME replace this packet test by a safe one { enum { Cost = 1, PacketAccess = packet_traits::Vectorizable, IsRepeatable = true }; }; template struct scalar_identity_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_identity_op) template EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const { return row==col ? Scalar(1) : Scalar(0); } }; template struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = false, IsRepeatable = true }; }; template struct linspaced_op_impl; // linear access for packet ops: // 1) initialization // base = [low, ..., low] + ([step, ..., step] * [-size, ..., 0]) // 2) each step (where size is 1 for coeff access or PacketSize for packet access) // base += [size*step, ..., size*step] // // TODO: Perhaps it's better to initialize lazily (so not in the constructor but in packetOp) // in order to avoid the padd() in operator() ? template struct linspaced_op_impl { typedef typename packet_traits::type Packet; linspaced_op_impl(const Scalar& low, const Scalar& step) : m_low(low), m_step(step), m_packetStep(pset1(packet_traits::size*step)), m_base(padd(pset1(low), pmul(pset1(step),plset(-packet_traits::size)))) {} template EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { m_base = padd(m_base, pset1(m_step)); return m_low+Scalar(i)*m_step; } template EIGEN_STRONG_INLINE const Packet packetOp(Index) const { return m_base = padd(m_base,m_packetStep); } const Scalar m_low; const Scalar m_step; const Packet m_packetStep; mutable Packet m_base; }; // random access for packet ops: // 1) each step // [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) ) template struct linspaced_op_impl { typedef typename packet_traits::type Packet; linspaced_op_impl(const Scalar& low, const Scalar& step) : m_low(low), m_step(step), m_lowPacket(pset1(m_low)), m_stepPacket(pset1(m_step)), m_interPacket(plset(0)) {} template EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; } template EIGEN_STRONG_INLINE const Packet packetOp(Index i) const { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1(Scalar(i)),m_interPacket))); } const Scalar m_low; const Scalar m_step; const Packet m_lowPacket; const Packet m_stepPacket; const Packet m_interPacket; }; // ----- Linspace functor ---------------------------------------------------------------- // Forward declaration (we default to random access which does not really give // us a speed gain when using packet access but it allows to use the functor in // nested expressions). template struct linspaced_op; template struct functor_traits< linspaced_op > { enum { Cost = 1, PacketAccess = packet_traits::HasSetLinear, IsRepeatable = true }; }; template struct linspaced_op { typedef typename packet_traits::type Packet; linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {} template EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); } // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since // there row==0 and col is used for the actual iteration. template EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const { eigen_assert(col==0 || row==0); return impl(col + row); } template EIGEN_STRONG_INLINE const Packet packetOp(Index i) const { return impl.packetOp(i); } // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since // there row==0 and col is used for the actual iteration. template EIGEN_STRONG_INLINE const Packet packetOp(Index row, Index col) const { eigen_assert(col==0 || row==0); return impl.packetOp(col + row); } // This proxy object handles the actual required temporaries, the different // implementations (random vs. sequential access) as well as the // correct piping to size 2/4 packet operations. const linspaced_op_impl impl; }; // all functors allow linear access, except scalar_identity_op. So we fix here a quick meta // to indicate whether a functor allows linear access, just always answering 'yes' except for // scalar_identity_op. // FIXME move this to functor_traits adding a functor_default template struct functor_has_linear_access { enum { ret = 1 }; }; template struct functor_has_linear_access > { enum { ret = 0 }; }; // In Eigen, any binary op (Product, CwiseBinaryOp) require the Lhs and Rhs to have the same scalar type, except for multiplication // where the mixing of different types is handled by scalar_product_traits // In particular, real * complex is allowed. // FIXME move this to functor_traits adding a functor_default template struct functor_is_product_like { enum { ret = 0 }; }; template struct functor_is_product_like > { enum { ret = 1 }; }; template struct functor_is_product_like > { enum { ret = 1 }; }; template struct functor_is_product_like > { enum { ret = 1 }; }; /** \internal * \brief Template functor to add a scalar to a fixed other one * \sa class CwiseUnaryOp, Array::operator+ */ /* If you wonder why doing the pset1() in packetOp() is an optimization check scalar_multiple_op */ template struct scalar_add_op { typedef typename packet_traits::type Packet; // FIXME default copy constructors seems bugged with std::complex<> inline scalar_add_op(const scalar_add_op& other) : m_other(other.m_other) { } inline scalar_add_op(const Scalar& other) : m_other(other) { } inline Scalar operator() (const Scalar& a) const { return a + m_other; } inline const Packet packetOp(const Packet& a) const { return internal::padd(a, pset1(m_other)); } const Scalar m_other; }; template struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = packet_traits::HasAdd }; }; /** \internal * \brief Template functor to compute the square root of a scalar * \sa class CwiseUnaryOp, Cwise::sqrt() */ template struct scalar_sqrt_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op) inline const Scalar operator() (const Scalar& a) const { using std::sqrt; return sqrt(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); } }; template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasSqrt }; }; /** \internal * \brief Template functor to compute the cosine of a scalar * \sa class CwiseUnaryOp, ArrayBase::cos() */ template struct scalar_cos_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op) inline Scalar operator() (const Scalar& a) const { using std::cos; return cos(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::pcos(a); } }; template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasCos }; }; /** \internal * \brief Template functor to compute the sine of a scalar * \sa class CwiseUnaryOp, ArrayBase::sin() */ template struct scalar_sin_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op) inline const Scalar operator() (const Scalar& a) const { using std::sin; return sin(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::psin(a); } }; template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasSin }; }; /** \internal * \brief Template functor to compute the tan of a scalar * \sa class CwiseUnaryOp, ArrayBase::tan() */ template struct scalar_tan_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op) inline const Scalar operator() (const Scalar& a) const { using std::tan; return tan(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::ptan(a); } }; template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasTan }; }; /** \internal * \brief Template functor to compute the arc cosine of a scalar * \sa class CwiseUnaryOp, ArrayBase::acos() */ template struct scalar_acos_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op) inline const Scalar operator() (const Scalar& a) const { using std::acos; return acos(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::pacos(a); } }; template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasACos }; }; /** \internal * \brief Template functor to compute the arc sine of a scalar * \sa class CwiseUnaryOp, ArrayBase::asin() */ template struct scalar_asin_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op) inline const Scalar operator() (const Scalar& a) const { using std::asin; return asin(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::pasin(a); } }; template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasASin }; }; /** \internal * \brief Template functor to raise a scalar to a power * \sa class CwiseUnaryOp, Cwise::pow */ template struct scalar_pow_op { // FIXME default copy constructors seems bugged with std::complex<> inline scalar_pow_op(const scalar_pow_op& other) : m_exponent(other.m_exponent) { } inline scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {} inline Scalar operator() (const Scalar& a) const { return numext::pow(a, m_exponent); } const Scalar m_exponent; }; template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; }; /** \internal * \brief Template functor to compute the quotient between a scalar and array entries. * \sa class CwiseUnaryOp, Cwise::inverse() */ template struct scalar_inverse_mult_op { scalar_inverse_mult_op(const Scalar& other) : m_other(other) {} inline Scalar operator() (const Scalar& a) const { return m_other / a; } template inline const Packet packetOp(const Packet& a) const { return internal::pdiv(pset1(m_other),a); } Scalar m_other; }; /** \internal * \brief Template functor to compute the inverse of a scalar * \sa class CwiseUnaryOp, Cwise::inverse() */ template struct scalar_inverse_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_inverse_op) inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; } template inline const Packet packetOp(const Packet& a) const { return internal::pdiv(pset1(Scalar(1)),a); } }; template struct functor_traits > { enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasDiv }; }; /** \internal * \brief Template functor to compute the square of a scalar * \sa class CwiseUnaryOp, Cwise::square() */ template struct scalar_square_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_square_op) inline Scalar operator() (const Scalar& a) const { return a*a; } template inline const Packet packetOp(const Packet& a) const { return internal::pmul(a,a); } }; template struct functor_traits > { enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; /** \internal * \brief Template functor to compute the cube of a scalar * \sa class CwiseUnaryOp, Cwise::cube() */ template struct scalar_cube_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_cube_op) inline Scalar operator() (const Scalar& a) const { return a*a*a; } template inline const Packet packetOp(const Packet& a) const { return internal::pmul(a,pmul(a,a)); } }; template struct functor_traits > { enum { Cost = 2*NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; // default functor traits for STL functors: template struct functor_traits > { enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = functor_traits::Cost, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = functor_traits::Cost, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = 1 + functor_traits::Cost, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = 1 + functor_traits::Cost, PacketAccess = false }; }; #ifdef EIGEN_STDEXT_SUPPORT template struct functor_traits > { enum { Cost = 0, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = 0, PacketAccess = false }; }; template struct functor_traits > > { enum { Cost = 0, PacketAccess = false }; }; template struct functor_traits > > { enum { Cost = 0, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = functor_traits::Cost + functor_traits::Cost, PacketAccess = false }; }; template struct functor_traits > { enum { Cost = functor_traits::Cost + functor_traits::Cost + functor_traits::Cost, PacketAccess = false }; }; #endif // EIGEN_STDEXT_SUPPORT // allow to add new functors and specializations of functor_traits from outside Eigen. // this macro is really needed because functor_traits must be specialized after it is declared but before it is used... #ifdef EIGEN_FUNCTORS_PLUGIN #include EIGEN_FUNCTORS_PLUGIN #endif } // end namespace internal } // end namespace Eigen #endif // EIGEN_FUNCTORS_H RcppEigen/inst/include/Eigen/src/Core/Transpose.h0000644000176200001440000003514012305236565021375 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRANSPOSE_H #define EIGEN_TRANSPOSE_H namespace Eigen { /** \class Transpose * \ingroup Core_Module * * \brief Expression of the transpose of a matrix * * \param MatrixType the type of the object of which we are taking the transpose * * This class represents an expression of the transpose of a matrix. * It is the return type of MatrixBase::transpose() and MatrixBase::adjoint() * and most of the time this is the only way it is used. * * \sa MatrixBase::transpose(), MatrixBase::adjoint() */ namespace internal { template struct traits > : traits { typedef typename MatrixType::Scalar Scalar; typedef typename nested::type MatrixTypeNested; typedef typename remove_reference::type MatrixTypeNestedPlain; typedef typename traits::StorageKind StorageKind; typedef typename traits::XprKind XprKind; enum { RowsAtCompileTime = MatrixType::ColsAtCompileTime, ColsAtCompileTime = MatrixType::RowsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime, FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, Flags0 = MatrixTypeNestedPlain::Flags & ~(LvalueBit | NestByRefBit), Flags1 = Flags0 | FlagsLvalueBit, Flags = Flags1 ^ RowMajorBit, CoeffReadCost = MatrixTypeNestedPlain::CoeffReadCost, InnerStrideAtCompileTime = inner_stride_at_compile_time::ret, OuterStrideAtCompileTime = outer_stride_at_compile_time::ret }; }; } template class TransposeImpl; template class Transpose : public TransposeImpl::StorageKind> { public: typedef typename TransposeImpl::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose) inline Transpose(MatrixType& a_matrix) : m_matrix(a_matrix) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose) inline Index rows() const { return m_matrix.cols(); } inline Index cols() const { return m_matrix.rows(); } /** \returns the nested expression */ const typename internal::remove_all::type& nestedExpression() const { return m_matrix; } /** \returns the nested expression */ typename internal::remove_all::type& nestedExpression() { return m_matrix.const_cast_derived(); } protected: typename MatrixType::Nested m_matrix; }; namespace internal { template::ret> struct TransposeImpl_base { typedef typename dense_xpr_base >::type type; }; template struct TransposeImpl_base { typedef typename dense_xpr_base >::type type; }; } // end namespace internal template class TransposeImpl : public internal::TransposeImpl_base::type { public: typedef typename internal::TransposeImpl_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Transpose) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TransposeImpl) inline Index innerStride() const { return derived().nestedExpression().innerStride(); } inline Index outerStride() const { return derived().nestedExpression().outerStride(); } typedef typename internal::conditional< internal::is_lvalue::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); } inline const Scalar* data() const { return derived().nestedExpression().data(); } inline ScalarWithConstIfNotLvalue& coeffRef(Index rowId, Index colId) { EIGEN_STATIC_ASSERT_LVALUE(MatrixType) return derived().nestedExpression().const_cast_derived().coeffRef(colId, rowId); } inline ScalarWithConstIfNotLvalue& coeffRef(Index index) { EIGEN_STATIC_ASSERT_LVALUE(MatrixType) return derived().nestedExpression().const_cast_derived().coeffRef(index); } inline const Scalar& coeffRef(Index rowId, Index colId) const { return derived().nestedExpression().coeffRef(colId, rowId); } inline const Scalar& coeffRef(Index index) const { return derived().nestedExpression().coeffRef(index); } inline CoeffReturnType coeff(Index rowId, Index colId) const { return derived().nestedExpression().coeff(colId, rowId); } inline CoeffReturnType coeff(Index index) const { return derived().nestedExpression().coeff(index); } template inline const PacketScalar packet(Index rowId, Index colId) const { return derived().nestedExpression().template packet(colId, rowId); } template inline void writePacket(Index rowId, Index colId, const PacketScalar& x) { derived().nestedExpression().const_cast_derived().template writePacket(colId, rowId, x); } template inline const PacketScalar packet(Index index) const { return derived().nestedExpression().template packet(index); } template inline void writePacket(Index index, const PacketScalar& x) { derived().nestedExpression().const_cast_derived().template writePacket(index, x); } }; /** \returns an expression of the transpose of *this. * * Example: \include MatrixBase_transpose.cpp * Output: \verbinclude MatrixBase_transpose.out * * \warning If you want to replace a matrix by its own transpose, do \b NOT do this: * \code * m = m.transpose(); // bug!!! caused by aliasing effect * \endcode * Instead, use the transposeInPlace() method: * \code * m.transposeInPlace(); * \endcode * which gives Eigen good opportunities for optimization, or alternatively you can also do: * \code * m = m.transpose().eval(); * \endcode * * \sa transposeInPlace(), adjoint() */ template inline Transpose DenseBase::transpose() { return derived(); } /** This is the const version of transpose(). * * Make sure you read the warning for transpose() ! * * \sa transposeInPlace(), adjoint() */ template inline typename DenseBase::ConstTransposeReturnType DenseBase::transpose() const { return ConstTransposeReturnType(derived()); } /** \returns an expression of the adjoint (i.e. conjugate transpose) of *this. * * Example: \include MatrixBase_adjoint.cpp * Output: \verbinclude MatrixBase_adjoint.out * * \warning If you want to replace a matrix by its own adjoint, do \b NOT do this: * \code * m = m.adjoint(); // bug!!! caused by aliasing effect * \endcode * Instead, use the adjointInPlace() method: * \code * m.adjointInPlace(); * \endcode * which gives Eigen good opportunities for optimization, or alternatively you can also do: * \code * m = m.adjoint().eval(); * \endcode * * \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */ template inline const typename MatrixBase::AdjointReturnType MatrixBase::adjoint() const { return this->transpose(); // in the complex case, the .conjugate() is be implicit here // due to implicit conversion to return type } /*************************************************************************** * "in place" transpose implementation ***************************************************************************/ namespace internal { template struct inplace_transpose_selector; template struct inplace_transpose_selector { // square matrix static void run(MatrixType& m) { m.matrix().template triangularView().swap(m.matrix().transpose()); } }; template struct inplace_transpose_selector { // non square matrix static void run(MatrixType& m) { if (m.rows()==m.cols()) m.matrix().template triangularView().swap(m.matrix().transpose()); else m = m.transpose().eval(); } }; } // end namespace internal /** This is the "in place" version of transpose(): it replaces \c *this by its own transpose. * Thus, doing * \code * m.transposeInPlace(); * \endcode * has the same effect on m as doing * \code * m = m.transpose().eval(); * \endcode * and is faster and also safer because in the latter line of code, forgetting the eval() results * in a bug caused by \ref TopicAliasing "aliasing". * * Notice however that this method is only useful if you want to replace a matrix by its own transpose. * If you just need the transpose of a matrix, use transpose(). * * \note if the matrix is not square, then \c *this must be a resizable matrix. * This excludes (non-square) fixed-size matrices, block-expressions and maps. * * \sa transpose(), adjoint(), adjointInPlace() */ template inline void DenseBase::transposeInPlace() { eigen_assert((rows() == cols() || (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic)) && "transposeInPlace() called on a non-square non-resizable matrix"); internal::inplace_transpose_selector::run(derived()); } /*************************************************************************** * "in place" adjoint implementation ***************************************************************************/ /** This is the "in place" version of adjoint(): it replaces \c *this by its own transpose. * Thus, doing * \code * m.adjointInPlace(); * \endcode * has the same effect on m as doing * \code * m = m.adjoint().eval(); * \endcode * and is faster and also safer because in the latter line of code, forgetting the eval() results * in a bug caused by aliasing. * * Notice however that this method is only useful if you want to replace a matrix by its own adjoint. * If you just need the adjoint of a matrix, use adjoint(). * * \note if the matrix is not square, then \c *this must be a resizable matrix. * This excludes (non-square) fixed-size matrices, block-expressions and maps. * * \sa transpose(), adjoint(), transposeInPlace() */ template inline void MatrixBase::adjointInPlace() { derived() = adjoint().eval(); } #ifndef EIGEN_NO_DEBUG // The following is to detect aliasing problems in most common cases. namespace internal { template struct blas_traits > : blas_traits { typedef SelfCwiseBinaryOp XprType; static inline const XprType extract(const XprType& x) { return x; } }; template struct check_transpose_aliasing_compile_time_selector { enum { ret = bool(blas_traits::IsTransposed) != DestIsTransposed }; }; template struct check_transpose_aliasing_compile_time_selector > { enum { ret = bool(blas_traits::IsTransposed) != DestIsTransposed || bool(blas_traits::IsTransposed) != DestIsTransposed }; }; template struct check_transpose_aliasing_run_time_selector { static bool run(const Scalar* dest, const OtherDerived& src) { return (bool(blas_traits::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src)); } }; template struct check_transpose_aliasing_run_time_selector > { static bool run(const Scalar* dest, const CwiseBinaryOp& src) { return ((blas_traits::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.lhs()))) || ((blas_traits::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.rhs()))); } }; // the following selector, checkTransposeAliasing_impl, based on MightHaveTransposeAliasing, // is because when the condition controlling the assert is known at compile time, ICC emits a warning. // This is actually a good warning: in expressions that don't have any transposing, the condition is // known at compile time to be false, and using that, we can avoid generating the code of the assert again // and again for all these expressions that don't need it. template::IsTransposed,OtherDerived>::ret > struct checkTransposeAliasing_impl { static void run(const Derived& dst, const OtherDerived& other) { eigen_assert((!check_transpose_aliasing_run_time_selector ::IsTransposed,OtherDerived> ::run(extract_data(dst), other)) && "aliasing detected during transposition, use transposeInPlace() " "or evaluate the rhs into a temporary using .eval()"); } }; template struct checkTransposeAliasing_impl { static void run(const Derived&, const OtherDerived&) { } }; } // end namespace internal template template void DenseBase::checkTransposeAliasing(const OtherDerived& other) const { internal::checkTransposeAliasing_impl::run(derived(), other); } #endif } // end namespace Eigen #endif // EIGEN_TRANSPOSE_H RcppEigen/inst/include/Eigen/src/Core/NumTraits.h0000644000176200001440000001433012253717461021344 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NUMTRAITS_H #define EIGEN_NUMTRAITS_H namespace Eigen { /** \class NumTraits * \ingroup Core_Module * * \brief Holds information about the various numeric (i.e. scalar) types allowed by Eigen. * * \param T the numeric type at hand * * This class stores enums, typedefs and static methods giving information about a numeric type. * * The provided data consists of: * \li A typedef \a Real, giving the "real part" type of \a T. If \a T is already real, * then \a Real is just a typedef to \a T. If \a T is \c std::complex then \a Real * is a typedef to \a U. * \li A typedef \a NonInteger, giving the type that should be used for operations producing non-integral values, * such as quotients, square roots, etc. If \a T is a floating-point type, then this typedef just gives * \a T again. Note however that many Eigen functions such as internal::sqrt simply refuse to * take integers. Outside of a few cases, Eigen doesn't do automatic type promotion. Thus, this typedef is * only intended as a helper for code that needs to explicitly promote types. * \li A typedef \a Nested giving the type to use to nest a value inside of the expression tree. If you don't know what * this means, just use \a T here. * \li An enum value \a IsComplex. It is equal to 1 if \a T is a \c std::complex * type, and to 0 otherwise. * \li An enum value \a IsInteger. It is equal to \c 1 if \a T is an integer type such as \c int, * and to \c 0 otherwise. * \li Enum values ReadCost, AddCost and MulCost representing a rough estimate of the number of CPU cycles needed * to by move / add / mul instructions respectively, assuming the data is already stored in CPU registers. * Stay vague here. No need to do architecture-specific stuff. * \li An enum value \a IsSigned. It is equal to \c 1 if \a T is a signed type and to 0 if \a T is unsigned. * \li An enum value \a RequireInitialization. It is equal to \c 1 if the constructor of the numeric type \a T must * be called, and to 0 if it is safe not to call it. Default is 0 if \a T is an arithmetic type, and 1 otherwise. * \li An epsilon() function which, unlike std::numeric_limits::epsilon(), returns a \a Real instead of a \a T. * \li A dummy_precision() function returning a weak epsilon value. It is mainly used as a default * value by the fuzzy comparison operators. * \li highest() and lowest() functions returning the highest and lowest possible values respectively. */ template struct GenericNumTraits { enum { IsInteger = std::numeric_limits::is_integer, IsSigned = std::numeric_limits::is_signed, IsComplex = 0, RequireInitialization = internal::is_arithmetic::value ? 0 : 1, ReadCost = 1, AddCost = 1, MulCost = 1 }; typedef T Real; typedef typename internal::conditional< IsInteger, typename internal::conditional::type, T >::type NonInteger; typedef T Nested; static inline Real epsilon() { return std::numeric_limits::epsilon(); } static inline Real dummy_precision() { // make sure to override this for floating-point types return Real(0); } static inline T highest() { return (std::numeric_limits::max)(); } static inline T lowest() { return IsInteger ? (std::numeric_limits::min)() : (-(std::numeric_limits::max)()); } #ifdef EIGEN2_SUPPORT enum { HasFloatingPoint = !IsInteger }; typedef NonInteger FloatingPoint; #endif }; template struct NumTraits : GenericNumTraits {}; template<> struct NumTraits : GenericNumTraits { static inline float dummy_precision() { return 1e-5f; } }; template<> struct NumTraits : GenericNumTraits { static inline double dummy_precision() { return 1e-12; } }; template<> struct NumTraits : GenericNumTraits { static inline long double dummy_precision() { return 1e-15l; } }; template struct NumTraits > : GenericNumTraits > { typedef _Real Real; enum { IsComplex = 1, RequireInitialization = NumTraits<_Real>::RequireInitialization, ReadCost = 2 * NumTraits<_Real>::ReadCost, AddCost = 2 * NumTraits::AddCost, MulCost = 4 * NumTraits::MulCost + 2 * NumTraits::AddCost }; static inline Real epsilon() { return NumTraits::epsilon(); } static inline Real dummy_precision() { return NumTraits::dummy_precision(); } }; template struct NumTraits > { typedef Array ArrayType; typedef typename NumTraits::Real RealScalar; typedef Array Real; typedef typename NumTraits::NonInteger NonIntegerScalar; typedef Array NonInteger; typedef ArrayType & Nested; enum { IsComplex = NumTraits::IsComplex, IsInteger = NumTraits::IsInteger, IsSigned = NumTraits::IsSigned, RequireInitialization = 1, ReadCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits::ReadCost, AddCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits::AddCost, MulCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits::MulCost }; static inline RealScalar epsilon() { return NumTraits::epsilon(); } static inline RealScalar dummy_precision() { return NumTraits::dummy_precision(); } }; } // end namespace Eigen #endif // EIGEN_NUMTRAITS_H RcppEigen/inst/include/Eigen/src/Core/ReturnByValue.h0000644000176200001440000000666712550462273022201 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud // Copyright (C) 2009-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_RETURNBYVALUE_H #define EIGEN_RETURNBYVALUE_H namespace Eigen { /** \class ReturnByValue * \ingroup Core_Module * */ namespace internal { template struct traits > : public traits::ReturnType> { enum { // We're disabling the DirectAccess because e.g. the constructor of // the Block-with-DirectAccess expression requires to have a coeffRef method. // Also, we don't want to have to implement the stride stuff. Flags = (traits::ReturnType>::Flags | EvalBeforeNestingBit) & ~DirectAccessBit }; }; /* The ReturnByValue object doesn't even have a coeff() method. * So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix. * So internal::nested always gives the plain return matrix type. * * FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ?? */ template struct nested, n, PlainObject> { typedef typename traits::ReturnType type; }; } // end namespace internal template class ReturnByValue : internal::no_assignment_operator, public internal::dense_xpr_base< ReturnByValue >::type { public: typedef typename internal::traits::ReturnType ReturnType; typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue) template inline void evalTo(Dest& dst) const { static_cast(this)->evalTo(dst); } inline Index rows() const { return static_cast(this)->rows(); } inline Index cols() const { return static_cast(this)->cols(); } #ifndef EIGEN_PARSED_BY_DOXYGEN #define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT class Unusable{ Unusable(const Unusable&) {} Unusable& operator=(const Unusable&) {return *this;} }; const Unusable& coeff(Index) const { return *reinterpret_cast(this); } const Unusable& coeff(Index,Index) const { return *reinterpret_cast(this); } Unusable& coeffRef(Index) { return *reinterpret_cast(this); } Unusable& coeffRef(Index,Index) { return *reinterpret_cast(this); } template Unusable& packet(Index) const; template Unusable& packet(Index, Index) const; #endif }; template template Derived& DenseBase::operator=(const ReturnByValue& other) { other.evalTo(derived()); return derived(); } template template Derived& DenseBase::lazyAssign(const ReturnByValue& other) { other.evalTo(derived()); return derived(); } } // end namespace Eigen #endif // EIGEN_RETURNBYVALUE_H RcppEigen/inst/include/Eigen/src/Core/Fuzzy.h0000644000176200001440000001272412253717461020552 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_FUZZY_H #define EIGEN_FUZZY_H namespace Eigen { namespace internal { template::IsInteger> struct isApprox_selector { static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) { using std::min; typename internal::nested::type nested(x); typename internal::nested::type otherNested(y); return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); } }; template struct isApprox_selector { static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&) { return x.matrix() == y.matrix(); } }; template::IsInteger> struct isMuchSmallerThan_object_selector { static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) { return x.cwiseAbs2().sum() <= numext::abs2(prec) * y.cwiseAbs2().sum(); } }; template struct isMuchSmallerThan_object_selector { static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&) { return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); } }; template::IsInteger> struct isMuchSmallerThan_scalar_selector { static bool run(const Derived& x, const typename Derived::RealScalar& y, const typename Derived::RealScalar& prec) { return x.cwiseAbs2().sum() <= numext::abs2(prec * y); } }; template struct isMuchSmallerThan_scalar_selector { static bool run(const Derived& x, const typename Derived::RealScalar&, const typename Derived::RealScalar&) { return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); } }; } // end namespace internal /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$ * are considered to be approximately equal within precision \f$ p \f$ if * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f] * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm * L2 norm). * * \note Because of the multiplicativeness of this comparison, one can't use this function * to check whether \c *this is approximately equal to the zero matrix or vector. * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix * or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const * RealScalar&, RealScalar) instead. * * \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const */ template template bool DenseBase::isApprox( const DenseBase& other, const RealScalar& prec ) const { return internal::isApprox_selector::run(derived(), other.derived(), prec); } /** \returns \c true if the norm of \c *this is much smaller than \a other, * within the precision determined by \a prec. * * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f] * * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason, * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm * of a reference matrix of same dimensions. * * \sa isApprox(), isMuchSmallerThan(const DenseBase&, RealScalar) const */ template bool DenseBase::isMuchSmallerThan( const typename NumTraits::Real& other, const RealScalar& prec ) const { return internal::isMuchSmallerThan_scalar_selector::run(derived(), other, prec); } /** \returns \c true if the norm of \c *this is much smaller than the norm of \a other, * within the precision determined by \a prec. * * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f] * For matrices, the comparison is done using the Hilbert-Schmidt norm. * * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const */ template template bool DenseBase::isMuchSmallerThan( const DenseBase& other, const RealScalar& prec ) const { return internal::isMuchSmallerThan_object_selector::run(derived(), other.derived(), prec); } } // end namespace Eigen #endif // EIGEN_FUZZY_H RcppEigen/inst/include/Eigen/src/Core/CwiseNullaryOp.h0000644000176200001440000007132712253717461022347 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_NULLARY_OP_H #define EIGEN_CWISE_NULLARY_OP_H namespace Eigen { /** \class CwiseNullaryOp * \ingroup Core_Module * * \brief Generic expression of a matrix where all coefficients are defined by a functor * * \param NullaryOp template functor implementing the operator * \param PlainObjectType the underlying plain matrix/array type * * This class represents an expression of a generic nullary operator. * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods, * and most of the time this is the only way it is used. * * However, if you want to write a function returning such an expression, you * will need to use this class. * * \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr() */ namespace internal { template struct traits > : traits { enum { Flags = (traits::Flags & ( HereditaryBits | (functor_has_linear_access::ret ? LinearAccessBit : 0) | (functor_traits::PacketAccess ? PacketAccessBit : 0))) | (functor_traits::IsRepeatable ? 0 : EvalBeforeNestingBit), CoeffReadCost = functor_traits::Cost }; }; } template class CwiseNullaryOp : internal::no_assignment_operator, public internal::dense_xpr_base< CwiseNullaryOp >::type { public: typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp) CwiseNullaryOp(Index nbRows, Index nbCols, const NullaryOp& func = NullaryOp()) : m_rows(nbRows), m_cols(nbCols), m_functor(func) { eigen_assert(nbRows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == nbRows) && nbCols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == nbCols)); } EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); } EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); } EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const { return m_functor(rowId, colId); } template EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const { return m_functor.packetOp(rowId, colId); } EIGEN_STRONG_INLINE const Scalar coeff(Index index) const { return m_functor(index); } template EIGEN_STRONG_INLINE PacketScalar packet(Index index) const { return m_functor.packetOp(index); } /** \returns the functor representing the nullary operation */ const NullaryOp& functor() const { return m_functor; } protected: const internal::variable_if_dynamic m_rows; const internal::variable_if_dynamic m_cols; const NullaryOp m_functor; }; /** \returns an expression of a matrix defined by a custom functor \a func * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used * instead. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template template EIGEN_STRONG_INLINE const CwiseNullaryOp DenseBase::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func) { return CwiseNullaryOp(rows, cols, func); } /** \returns an expression of a matrix defined by a custom functor \a func * * The parameter \a size is the size of the returned vector. * Must be compatible with this MatrixBase type. * * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Zero() should be used * instead. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template template EIGEN_STRONG_INLINE const CwiseNullaryOp DenseBase::NullaryExpr(Index size, const CustomNullaryOp& func) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) if(RowsAtCompileTime == 1) return CwiseNullaryOp(1, size, func); else return CwiseNullaryOp(size, 1, func); } /** \returns an expression of a matrix defined by a custom functor \a func * * This variant is only for fixed-size DenseBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template template EIGEN_STRONG_INLINE const CwiseNullaryOp DenseBase::NullaryExpr(const CustomNullaryOp& func) { return CwiseNullaryOp(RowsAtCompileTime, ColsAtCompileTime, func); } /** \returns an expression of a constant matrix of value \a value * * The parameters \a nbRows and \a nbCols are the number of rows and of columns of * the returned matrix. Must be compatible with this DenseBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a nbRows and \a nbCols as arguments, so Zero() should be used * instead. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Constant(Index nbRows, Index nbCols, const Scalar& value) { return DenseBase::NullaryExpr(nbRows, nbCols, internal::scalar_constant_op(value)); } /** \returns an expression of a constant matrix of value \a value * * The parameter \a size is the size of the returned vector. * Must be compatible with this DenseBase type. * * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Zero() should be used * instead. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Constant(Index size, const Scalar& value) { return DenseBase::NullaryExpr(size, internal::scalar_constant_op(value)); } /** \returns an expression of a constant matrix of value \a value * * This variant is only for fixed-size DenseBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Constant(const Scalar& value) { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) return DenseBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op(value)); } /** * \brief Sets a linearly space vector. * * The function generates 'size' equally spaced values in the closed interval [low,high]. * This particular version of LinSpaced() uses sequential access, i.e. vector access is * assumed to be a(0), a(1), ..., a(size). This assumption allows for better vectorization * and yields faster code than the random access version. * * When size is set to 1, a vector of length 1 containing 'high' is returned. * * \only_for_vectors * * Example: \include DenseBase_LinSpaced_seq.cpp * Output: \verbinclude DenseBase_LinSpaced_seq.out * * \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Index,Scalar,Scalar), CwiseNullaryOp */ template EIGEN_STRONG_INLINE const typename DenseBase::SequentialLinSpacedReturnType DenseBase::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return DenseBase::NullaryExpr(size, internal::linspaced_op(low,high,size)); } /** * \copydoc DenseBase::LinSpaced(Sequential_t, Index, const Scalar&, const Scalar&) * Special version for fixed size types which does not require the size parameter. */ template EIGEN_STRONG_INLINE const typename DenseBase::SequentialLinSpacedReturnType DenseBase::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op(low,high,Derived::SizeAtCompileTime)); } /** * \brief Sets a linearly space vector. * * The function generates 'size' equally spaced values in the closed interval [low,high]. * When size is set to 1, a vector of length 1 containing 'high' is returned. * * \only_for_vectors * * Example: \include DenseBase_LinSpaced.cpp * Output: \verbinclude DenseBase_LinSpaced.out * * \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Sequential_t,Index,const Scalar&,const Scalar&,Index), CwiseNullaryOp */ template EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType DenseBase::LinSpaced(Index size, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return DenseBase::NullaryExpr(size, internal::linspaced_op(low,high,size)); } /** * \copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&) * Special version for fixed size types which does not require the size parameter. */ template EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType DenseBase::LinSpaced(const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op(low,high,Derived::SizeAtCompileTime)); } /** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */ template bool DenseBase::isApproxToConstant (const Scalar& val, const RealScalar& prec) const { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if(!internal::isApprox(this->coeff(i, j), val, prec)) return false; return true; } /** This is just an alias for isApproxToConstant(). * * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ template bool DenseBase::isConstant (const Scalar& val, const RealScalar& prec) const { return isApproxToConstant(val, prec); } /** Alias for setConstant(): sets all coefficients in this expression to \a val. * * \sa setConstant(), Constant(), class CwiseNullaryOp */ template EIGEN_STRONG_INLINE void DenseBase::fill(const Scalar& val) { setConstant(val); } /** Sets all coefficients in this expression to \a value. * * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes() */ template EIGEN_STRONG_INLINE Derived& DenseBase::setConstant(const Scalar& val) { return derived() = Constant(rows(), cols(), val); } /** Resizes to the given \a size, and sets all coefficients in this expression to the given \a value. * * \only_for_vectors * * Example: \include Matrix_setConstant_int.cpp * Output: \verbinclude Matrix_setConstant_int.out * * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) */ template EIGEN_STRONG_INLINE Derived& PlainObjectBase::setConstant(Index size, const Scalar& val) { resize(size); return setConstant(val); } /** Resizes to the given size, and sets all coefficients in this expression to the given \a value. * * \param nbRows the new number of rows * \param nbCols the new number of columns * \param val the value to which all coefficients are set * * Example: \include Matrix_setConstant_int_int.cpp * Output: \verbinclude Matrix_setConstant_int_int.out * * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) */ template EIGEN_STRONG_INLINE Derived& PlainObjectBase::setConstant(Index nbRows, Index nbCols, const Scalar& val) { resize(nbRows, nbCols); return setConstant(val); } /** * \brief Sets a linearly space vector. * * The function generates 'size' equally spaced values in the closed interval [low,high]. * When size is set to 1, a vector of length 1 containing 'high' is returned. * * \only_for_vectors * * Example: \include DenseBase_setLinSpaced.cpp * Output: \verbinclude DenseBase_setLinSpaced.out * * \sa CwiseNullaryOp */ template EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op(low,high,newSize)); } /** * \brief Sets a linearly space vector. * * The function fill *this with equally spaced values in the closed interval [low,high]. * When size is set to 1, a vector of length 1 containing 'high' is returned. * * \only_for_vectors * * \sa setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp */ template EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return setLinSpaced(size(), low, high); } // zero: /** \returns an expression of a zero matrix. * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used * instead. * * Example: \include MatrixBase_zero_int_int.cpp * Output: \verbinclude MatrixBase_zero_int_int.out * * \sa Zero(), Zero(Index) */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Zero(Index nbRows, Index nbCols) { return Constant(nbRows, nbCols, Scalar(0)); } /** \returns an expression of a zero vector. * * The parameter \a size is the size of the returned vector. * Must be compatible with this MatrixBase type. * * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Zero() should be used * instead. * * Example: \include MatrixBase_zero_int.cpp * Output: \verbinclude MatrixBase_zero_int.out * * \sa Zero(), Zero(Index,Index) */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Zero(Index size) { return Constant(size, Scalar(0)); } /** \returns an expression of a fixed-size zero matrix or vector. * * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * Example: \include MatrixBase_zero.cpp * Output: \verbinclude MatrixBase_zero.out * * \sa Zero(Index), Zero(Index,Index) */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Zero() { return Constant(Scalar(0)); } /** \returns true if *this is approximately equal to the zero matrix, * within the precision given by \a prec. * * Example: \include MatrixBase_isZero.cpp * Output: \verbinclude MatrixBase_isZero.out * * \sa class CwiseNullaryOp, Zero() */ template bool DenseBase::isZero(const RealScalar& prec) const { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast(1), prec)) return false; return true; } /** Sets all coefficients in this expression to zero. * * Example: \include MatrixBase_setZero.cpp * Output: \verbinclude MatrixBase_setZero.out * * \sa class CwiseNullaryOp, Zero() */ template EIGEN_STRONG_INLINE Derived& DenseBase::setZero() { return setConstant(Scalar(0)); } /** Resizes to the given \a size, and sets all coefficients in this expression to zero. * * \only_for_vectors * * Example: \include Matrix_setZero_int.cpp * Output: \verbinclude Matrix_setZero_int.out * * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero() */ template EIGEN_STRONG_INLINE Derived& PlainObjectBase::setZero(Index newSize) { resize(newSize); return setConstant(Scalar(0)); } /** Resizes to the given size, and sets all coefficients in this expression to zero. * * \param nbRows the new number of rows * \param nbCols the new number of columns * * Example: \include Matrix_setZero_int_int.cpp * Output: \verbinclude Matrix_setZero_int_int.out * * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero() */ template EIGEN_STRONG_INLINE Derived& PlainObjectBase::setZero(Index nbRows, Index nbCols) { resize(nbRows, nbCols); return setConstant(Scalar(0)); } // ones: /** \returns an expression of a matrix where all coefficients equal one. * * The parameters \a nbRows and \a nbCols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used * instead. * * Example: \include MatrixBase_ones_int_int.cpp * Output: \verbinclude MatrixBase_ones_int_int.out * * \sa Ones(), Ones(Index), isOnes(), class Ones */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Ones(Index nbRows, Index nbCols) { return Constant(nbRows, nbCols, Scalar(1)); } /** \returns an expression of a vector where all coefficients equal one. * * The parameter \a newSize is the size of the returned vector. * Must be compatible with this MatrixBase type. * * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Ones() should be used * instead. * * Example: \include MatrixBase_ones_int.cpp * Output: \verbinclude MatrixBase_ones_int.out * * \sa Ones(), Ones(Index,Index), isOnes(), class Ones */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Ones(Index newSize) { return Constant(newSize, Scalar(1)); } /** \returns an expression of a fixed-size matrix or vector where all coefficients equal one. * * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * Example: \include MatrixBase_ones.cpp * Output: \verbinclude MatrixBase_ones.out * * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Ones() { return Constant(Scalar(1)); } /** \returns true if *this is approximately equal to the matrix where all coefficients * are equal to 1, within the precision given by \a prec. * * Example: \include MatrixBase_isOnes.cpp * Output: \verbinclude MatrixBase_isOnes.out * * \sa class CwiseNullaryOp, Ones() */ template bool DenseBase::isOnes (const RealScalar& prec) const { return isApproxToConstant(Scalar(1), prec); } /** Sets all coefficients in this expression to one. * * Example: \include MatrixBase_setOnes.cpp * Output: \verbinclude MatrixBase_setOnes.out * * \sa class CwiseNullaryOp, Ones() */ template EIGEN_STRONG_INLINE Derived& DenseBase::setOnes() { return setConstant(Scalar(1)); } /** Resizes to the given \a newSize, and sets all coefficients in this expression to one. * * \only_for_vectors * * Example: \include Matrix_setOnes_int.cpp * Output: \verbinclude Matrix_setOnes_int.out * * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones() */ template EIGEN_STRONG_INLINE Derived& PlainObjectBase::setOnes(Index newSize) { resize(newSize); return setConstant(Scalar(1)); } /** Resizes to the given size, and sets all coefficients in this expression to one. * * \param nbRows the new number of rows * \param nbCols the new number of columns * * Example: \include Matrix_setOnes_int_int.cpp * Output: \verbinclude Matrix_setOnes_int_int.out * * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones() */ template EIGEN_STRONG_INLINE Derived& PlainObjectBase::setOnes(Index nbRows, Index nbCols) { resize(nbRows, nbCols); return setConstant(Scalar(1)); } // Identity: /** \returns an expression of the identity matrix (not necessarily square). * * The parameters \a nbRows and \a nbCols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used * instead. * * Example: \include MatrixBase_identity_int_int.cpp * Output: \verbinclude MatrixBase_identity_int_int.out * * \sa Identity(), setIdentity(), isIdentity() */ template EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType MatrixBase::Identity(Index nbRows, Index nbCols) { return DenseBase::NullaryExpr(nbRows, nbCols, internal::scalar_identity_op()); } /** \returns an expression of the identity matrix (not necessarily square). * * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you * need to use the variant taking size arguments. * * Example: \include MatrixBase_identity.cpp * Output: \verbinclude MatrixBase_identity.out * * \sa Identity(Index,Index), setIdentity(), isIdentity() */ template EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType MatrixBase::Identity() { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) return MatrixBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op()); } /** \returns true if *this is approximately equal to the identity matrix * (not necessarily square), * within the precision given by \a prec. * * Example: \include MatrixBase_isIdentity.cpp * Output: \verbinclude MatrixBase_isIdentity.out * * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity() */ template bool MatrixBase::isIdentity (const RealScalar& prec) const { for(Index j = 0; j < cols(); ++j) { for(Index i = 0; i < rows(); ++i) { if(i == j) { if(!internal::isApprox(this->coeff(i, j), static_cast(1), prec)) return false; } else { if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast(1), prec)) return false; } } } return true; } namespace internal { template=16)> struct setIdentity_impl { static EIGEN_STRONG_INLINE Derived& run(Derived& m) { return m = Derived::Identity(m.rows(), m.cols()); } }; template struct setIdentity_impl { typedef typename Derived::Index Index; static EIGEN_STRONG_INLINE Derived& run(Derived& m) { m.setZero(); const Index size = (std::min)(m.rows(), m.cols()); for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1); return m; } }; } // end namespace internal /** Writes the identity expression (not necessarily square) into *this. * * Example: \include MatrixBase_setIdentity.cpp * Output: \verbinclude MatrixBase_setIdentity.out * * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity() */ template EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity() { return internal::setIdentity_impl::run(derived()); } /** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this. * * \param nbRows the new number of rows * \param nbCols the new number of columns * * Example: \include Matrix_setIdentity_int_int.cpp * Output: \verbinclude Matrix_setIdentity_int_int.out * * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity() */ template EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity(Index nbRows, Index nbCols) { derived().resize(nbRows, nbCols); return setIdentity(); } /** \returns an expression of the i-th unit (basis) vector. * * \only_for_vectors * * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(Index newSize, Index i) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return BasisReturnType(SquareMatrixType::Identity(newSize,newSize), i); } /** \returns an expression of the i-th unit (basis) vector. * * \only_for_vectors * * This variant is for fixed-size vector only. * * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(Index i) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return BasisReturnType(SquareMatrixType::Identity(),i); } /** \returns an expression of the X axis unit vector (1{,0}^*) * * \only_for_vectors * * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitX() { return Derived::Unit(0); } /** \returns an expression of the Y axis unit vector (0,1{,0}^*) * * \only_for_vectors * * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitY() { return Derived::Unit(1); } /** \returns an expression of the Z axis unit vector (0,0,1{,0}^*) * * \only_for_vectors * * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitZ() { return Derived::Unit(2); } /** \returns an expression of the W axis unit vector (0,0,0,1) * * \only_for_vectors * * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitW() { return Derived::Unit(3); } } // end namespace Eigen #endif // EIGEN_CWISE_NULLARY_OP_H RcppEigen/inst/include/Eigen/src/Core/ProductBase.h0000644000176200001440000002607012446006401021622 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PRODUCTBASE_H #define EIGEN_PRODUCTBASE_H namespace Eigen { /** \class ProductBase * \ingroup Core_Module * */ namespace internal { template struct traits > { typedef MatrixXpr XprKind; typedef typename remove_all<_Lhs>::type Lhs; typedef typename remove_all<_Rhs>::type Rhs; typedef typename scalar_product_traits::ReturnType Scalar; typedef typename promote_storage_type::StorageKind, typename traits::StorageKind>::ret StorageKind; typedef typename promote_index_type::Index, typename traits::Index>::type Index; enum { RowsAtCompileTime = traits::RowsAtCompileTime, ColsAtCompileTime = traits::ColsAtCompileTime, MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, MaxColsAtCompileTime = traits::MaxColsAtCompileTime, Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0) | EvalBeforeNestingBit | EvalBeforeAssigningBit | NestByRefBit, // Note that EvalBeforeNestingBit and NestByRefBit // are not used in practice because nested is overloaded for products CoeffReadCost = 0 // FIXME why is it needed ? }; }; } #define EIGEN_PRODUCT_PUBLIC_INTERFACE(Derived) \ typedef ProductBase Base; \ EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \ typedef typename Base::LhsNested LhsNested; \ typedef typename Base::_LhsNested _LhsNested; \ typedef typename Base::LhsBlasTraits LhsBlasTraits; \ typedef typename Base::ActualLhsType ActualLhsType; \ typedef typename Base::_ActualLhsType _ActualLhsType; \ typedef typename Base::RhsNested RhsNested; \ typedef typename Base::_RhsNested _RhsNested; \ typedef typename Base::RhsBlasTraits RhsBlasTraits; \ typedef typename Base::ActualRhsType ActualRhsType; \ typedef typename Base::_ActualRhsType _ActualRhsType; \ using Base::m_lhs; \ using Base::m_rhs; template class ProductBase : public MatrixBase { public: typedef MatrixBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(ProductBase) typedef typename Lhs::Nested LhsNested; typedef typename internal::remove_all::type _LhsNested; typedef internal::blas_traits<_LhsNested> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all::type _ActualLhsType; typedef typename internal::traits::Scalar LhsScalar; typedef typename Rhs::Nested RhsNested; typedef typename internal::remove_all::type _RhsNested; typedef internal::blas_traits<_RhsNested> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all::type _ActualRhsType; typedef typename internal::traits::Scalar RhsScalar; // Diagonal of a product: no need to evaluate the arguments because they are going to be evaluated only once typedef CoeffBasedProduct FullyLazyCoeffBaseProductType; public: #ifndef EIGEN_NO_MALLOC typedef typename Base::PlainObject BasePlainObject; typedef Matrix DynPlainObject; typedef typename internal::conditional<(BasePlainObject::SizeAtCompileTime==Dynamic) || (BasePlainObject::SizeAtCompileTime*int(sizeof(Scalar)) < int(EIGEN_STACK_ALLOCATION_LIMIT)), BasePlainObject, DynPlainObject>::type PlainObject; #else typedef typename Base::PlainObject PlainObject; #endif ProductBase(const Lhs& a_lhs, const Rhs& a_rhs) : m_lhs(a_lhs), m_rhs(a_rhs) { eigen_assert(a_lhs.cols() == a_rhs.rows() && "invalid matrix product" && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } inline Index rows() const { return m_lhs.rows(); } inline Index cols() const { return m_rhs.cols(); } template inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); } template inline void addTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(1)); } template inline void subTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(-1)); } template inline void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { derived().scaleAndAddTo(dst,alpha); } const _LhsNested& lhs() const { return m_lhs; } const _RhsNested& rhs() const { return m_rhs; } // Implicit conversion to the nested type (trigger the evaluation of the product) operator const PlainObject& () const { m_result.resize(m_lhs.rows(), m_rhs.cols()); derived().evalTo(m_result); return m_result; } const Diagonal diagonal() const { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); } template const Diagonal diagonal() const { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); } const Diagonal diagonal(Index index) const { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); } // restrict coeff accessors to 1x1 expressions. No need to care about mutators here since this isnt a Lvalue expression typename Base::CoeffReturnType coeff(Index row, Index col) const { #ifdef EIGEN2_SUPPORT return lhs().row(row).cwiseProduct(rhs().col(col).transpose()).sum(); #else EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); Matrix result = *this; return result.coeff(row,col); #endif } typename Base::CoeffReturnType coeff(Index i) const { EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); Matrix result = *this; return result.coeff(i); } const Scalar& coeffRef(Index row, Index col) const { EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); return derived().coeffRef(row,col); } const Scalar& coeffRef(Index i) const { EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); return derived().coeffRef(i); } protected: LhsNested m_lhs; RhsNested m_rhs; mutable PlainObject m_result; }; // here we need to overload the nested rule for products // such that the nested type is a const reference to a plain matrix namespace internal { template struct nested, N, PlainObject> { typedef typename GeneralProduct::PlainObject const& type; }; template struct nested, N, PlainObject> { typedef typename GeneralProduct::PlainObject const& type; }; } template class ScaledProduct; // Note that these two operator* functions are not defined as member // functions of ProductBase, because, otherwise we would have to // define all overloads defined in MatrixBase. Furthermore, Using // "using Base::operator*" would not work with MSVC. // // Also note that here we accept any compatible scalar types template const ScaledProduct operator*(const ProductBase& prod, const typename Derived::Scalar& x) { return ScaledProduct(prod.derived(), x); } template typename internal::enable_if::value, const ScaledProduct >::type operator*(const ProductBase& prod, const typename Derived::RealScalar& x) { return ScaledProduct(prod.derived(), x); } template const ScaledProduct operator*(const typename Derived::Scalar& x,const ProductBase& prod) { return ScaledProduct(prod.derived(), x); } template typename internal::enable_if::value, const ScaledProduct >::type operator*(const typename Derived::RealScalar& x,const ProductBase& prod) { return ScaledProduct(prod.derived(), x); } namespace internal { template struct traits > : traits, typename NestedProduct::_LhsNested, typename NestedProduct::_RhsNested> > { typedef typename traits::StorageKind StorageKind; }; } template class ScaledProduct : public ProductBase, typename NestedProduct::_LhsNested, typename NestedProduct::_RhsNested> { public: typedef ProductBase, typename NestedProduct::_LhsNested, typename NestedProduct::_RhsNested> Base; typedef typename Base::Scalar Scalar; typedef typename Base::PlainObject PlainObject; // EIGEN_PRODUCT_PUBLIC_INTERFACE(ScaledProduct) ScaledProduct(const NestedProduct& prod, const Scalar& x) : Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {} template inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst, Scalar(1)); } template inline void addTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(1)); } template inline void subTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(-1)); } template inline void scaleAndAddTo(Dest& dst, const Scalar& a_alpha) const { m_prod.derived().scaleAndAddTo(dst,a_alpha * m_alpha); } const Scalar& alpha() const { return m_alpha; } protected: const NestedProduct& m_prod; Scalar m_alpha; }; /** \internal * Overloaded to perform an efficient C = (A*B).lazy() */ template template Derived& MatrixBase::lazyAssign(const ProductBase& other) { other.derived().evalTo(derived()); return derived(); } } // end namespace Eigen #endif // EIGEN_PRODUCTBASE_H RcppEigen/inst/include/Eigen/src/Core/MathFunctions.h0000644000176200001440000005270012647570720022205 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATHFUNCTIONS_H #define EIGEN_MATHFUNCTIONS_H namespace Eigen { namespace internal { /** \internal \struct global_math_functions_filtering_base * * What it does: * Defines a typedef 'type' as follows: * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then * global_math_functions_filtering_base::type is a typedef for it. * - otherwise, global_math_functions_filtering_base::type is a typedef for T. * * How it's used: * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions. * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase. * So we must make sure to use sin_impl > and not sin_impl, otherwise our partial specialization * won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells it. * * How it's implemented: * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you replace * the typename dummy by an integer template parameter, it doesn't work anymore! */ template struct global_math_functions_filtering_base { typedef T type; }; template struct always_void { typedef void type; }; template struct global_math_functions_filtering_base ::type > { typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type; }; #define EIGEN_MATHFUNC_IMPL(func, scalar) Eigen::internal::func##_impl::type> #define EIGEN_MATHFUNC_RETVAL(func, scalar) typename Eigen::internal::func##_retval::type>::type /**************************************************************************** * Implementation of real * ****************************************************************************/ template::IsComplex> struct real_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) { return x; } }; template struct real_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) { using std::real; return real(x); } }; template struct real_impl : real_default_impl {}; template struct real_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of imag * ****************************************************************************/ template::IsComplex> struct imag_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar&) { return RealScalar(0); } }; template struct imag_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) { using std::imag; return imag(x); } }; template struct imag_impl : imag_default_impl {}; template struct imag_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of real_ref * ****************************************************************************/ template struct real_ref_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar& run(Scalar& x) { return reinterpret_cast(&x)[0]; } static inline const RealScalar& run(const Scalar& x) { return reinterpret_cast(&x)[0]; } }; template struct real_ref_retval { typedef typename NumTraits::Real & type; }; /**************************************************************************** * Implementation of imag_ref * ****************************************************************************/ template struct imag_ref_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar& run(Scalar& x) { return reinterpret_cast(&x)[1]; } static inline const RealScalar& run(const Scalar& x) { return reinterpret_cast(&x)[1]; } }; template struct imag_ref_default_impl { static inline Scalar run(Scalar&) { return Scalar(0); } static inline const Scalar run(const Scalar&) { return Scalar(0); } }; template struct imag_ref_impl : imag_ref_default_impl::IsComplex> {}; template struct imag_ref_retval { typedef typename NumTraits::Real & type; }; /**************************************************************************** * Implementation of conj * ****************************************************************************/ template::IsComplex> struct conj_impl { static inline Scalar run(const Scalar& x) { return x; } }; template struct conj_impl { static inline Scalar run(const Scalar& x) { using std::conj; return conj(x); } }; template struct conj_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of abs2 * ****************************************************************************/ template struct abs2_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) { return x*x; } }; template struct abs2_impl > { static inline RealScalar run(const std::complex& x) { return real(x)*real(x) + imag(x)*imag(x); } }; template struct abs2_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of norm1 * ****************************************************************************/ template struct norm1_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) { using std::abs; return abs(real(x)) + abs(imag(x)); } }; template struct norm1_default_impl { static inline Scalar run(const Scalar& x) { using std::abs; return abs(x); } }; template struct norm1_impl : norm1_default_impl::IsComplex> {}; template struct norm1_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of hypot * ****************************************************************************/ template struct hypot_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x, const Scalar& y) { using std::max; using std::min; using std::abs; using std::sqrt; RealScalar _x = abs(x); RealScalar _y = abs(y); RealScalar p = (max)(_x, _y); if(p==RealScalar(0)) return RealScalar(0); RealScalar q = (min)(_x, _y); RealScalar qp = q/p; return p * sqrt(RealScalar(1) + qp*qp); } }; template struct hypot_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of cast * ****************************************************************************/ template struct cast_impl { static inline NewType run(const OldType& x) { return static_cast(x); } }; // here, for once, we're plainly returning NewType: we don't want cast to do weird things. template inline NewType cast(const OldType& x) { return cast_impl::run(x); } /**************************************************************************** * Implementation of atanh2 * ****************************************************************************/ template struct atanh2_default_impl { typedef Scalar retval; typedef typename NumTraits::Real RealScalar; static inline Scalar run(const Scalar& x, const Scalar& y) { using std::abs; using std::log; using std::sqrt; Scalar z = x / y; if (y == Scalar(0) || abs(z) > sqrt(NumTraits::epsilon())) return RealScalar(0.5) * log((y + x) / (y - x)); else return z + z*z*z / RealScalar(3); } }; template struct atanh2_default_impl { static inline Scalar run(const Scalar&, const Scalar&) { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) return Scalar(0); } }; template struct atanh2_impl : atanh2_default_impl::IsInteger> {}; template struct atanh2_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of pow * ****************************************************************************/ template struct pow_default_impl { typedef Scalar retval; static inline Scalar run(const Scalar& x, const Scalar& y) { using std::pow; return pow(x, y); } }; template struct pow_default_impl { static inline Scalar run(Scalar x, Scalar y) { Scalar res(1); eigen_assert(!NumTraits::IsSigned || y >= 0); if(y & 1) res *= x; y >>= 1; while(y) { x *= x; if(y&1) res *= x; y >>= 1; } return res; } }; template struct pow_impl : pow_default_impl::IsInteger> {}; template struct pow_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of random * ****************************************************************************/ template struct random_default_impl {}; template struct random_impl : random_default_impl::IsComplex, NumTraits::IsInteger> {}; template struct random_retval { typedef Scalar type; }; template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y); template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(); template struct random_default_impl { static inline Scalar run(const Scalar& x, const Scalar& y) { return x + (y-x) * Scalar(std::rand()) / Scalar(RAND_MAX); } static inline Scalar run() { return run(Scalar(NumTraits::IsSigned ? -1 : 0), Scalar(1)); } }; enum { floor_log2_terminate, floor_log2_move_up, floor_log2_move_down, floor_log2_bogus }; template struct floor_log2_selector { enum { middle = (lower + upper) / 2, value = (upper <= lower + 1) ? int(floor_log2_terminate) : (n < (1 << middle)) ? int(floor_log2_move_down) : (n==0) ? int(floor_log2_bogus) : int(floor_log2_move_up) }; }; template::value> struct floor_log2 {}; template struct floor_log2 { enum { value = floor_log2::middle>::value }; }; template struct floor_log2 { enum { value = floor_log2::middle, upper>::value }; }; template struct floor_log2 { enum { value = (n >= ((unsigned int)(1) << (lower+1))) ? lower+1 : lower }; }; template struct floor_log2 { // no value, error at compile time }; template struct random_default_impl { typedef typename NumTraits::NonInteger NonInteger; static inline Scalar run(const Scalar& x, const Scalar& y) { return x + Scalar((NonInteger(y)-x+1) * std::rand() / (RAND_MAX + NonInteger(1))); } static inline Scalar run() { #ifdef EIGEN_MAKING_DOCS return run(Scalar(NumTraits::IsSigned ? -10 : 0), Scalar(10)); #else enum { rand_bits = floor_log2<(unsigned int)(RAND_MAX)+1>::value, scalar_bits = sizeof(Scalar) * CHAR_BIT, shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)), offset = NumTraits::IsSigned ? (1 << (EIGEN_PLAIN_ENUM_MIN(rand_bits,scalar_bits)-1)) : 0 }; return Scalar((std::rand() >> shift) - offset); #endif } }; template struct random_default_impl { static inline Scalar run(const Scalar& x, const Scalar& y) { return Scalar(random(real(x), real(y)), random(imag(x), imag(y))); } static inline Scalar run() { typedef typename NumTraits::Real RealScalar; return Scalar(random(), random()); } }; template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y); } template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random() { return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(); } } // end namespace internal /**************************************************************************** * Generic math function * ****************************************************************************/ namespace numext { template inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x); } template inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x) { return internal::real_ref_impl::run(x); } template inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x) { return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x); } template inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x); } template inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x) { return internal::imag_ref_impl::run(x); } template inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x) { return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x); } template inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x); } template inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x); } template inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x); } template inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y); } template inline EIGEN_MATHFUNC_RETVAL(atanh2, Scalar) atanh2(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(atanh2, Scalar)::run(x, y); } template inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y); } // std::isfinite is non standard, so let's define our own version, // even though it is not very efficient. template bool (isfinite)(const T& x) { return x::highest() && x>NumTraits::lowest(); } } // end namespace numext namespace internal { /**************************************************************************** * Implementation of fuzzy comparisons * ****************************************************************************/ template struct scalar_fuzzy_default_impl {}; template struct scalar_fuzzy_default_impl { typedef typename NumTraits::Real RealScalar; template static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) { using std::abs; return abs(x) <= abs(y) * prec; } static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { using std::min; using std::abs; return abs(x - y) <= (min)(abs(x), abs(y)) * prec; } static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec) { return x <= y || isApprox(x, y, prec); } }; template struct scalar_fuzzy_default_impl { typedef typename NumTraits::Real RealScalar; template static inline bool isMuchSmallerThan(const Scalar& x, const Scalar&, const RealScalar&) { return x == Scalar(0); } static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar&) { return x == y; } static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar&) { return x <= y; } }; template struct scalar_fuzzy_default_impl { typedef typename NumTraits::Real RealScalar; template static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) { return numext::abs2(x) <= numext::abs2(y) * prec * prec; } static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { using std::min; return numext::abs2(x - y) <= (min)(numext::abs2(x), numext::abs2(y)) * prec * prec; } }; template struct scalar_fuzzy_impl : scalar_fuzzy_default_impl::IsComplex, NumTraits::IsInteger> {}; template inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, typename NumTraits::Real precision = NumTraits::dummy_precision()) { return scalar_fuzzy_impl::template isMuchSmallerThan(x, y, precision); } template inline bool isApprox(const Scalar& x, const Scalar& y, typename NumTraits::Real precision = NumTraits::dummy_precision()) { return scalar_fuzzy_impl::isApprox(x, y, precision); } template inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, typename NumTraits::Real precision = NumTraits::dummy_precision()) { return scalar_fuzzy_impl::isApproxOrLessThan(x, y, precision); } /****************************************** *** The special case of the bool type *** ******************************************/ template<> struct random_impl { static inline bool run() { return random(0,1)==0 ? false : true; } }; template<> struct scalar_fuzzy_impl { typedef bool RealScalar; template static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&) { return !x; } static inline bool isApprox(bool x, bool y, bool) { return x == y; } static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&) { return (!x) || y; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_MATHFUNCTIONS_H RcppEigen/inst/include/Eigen/src/Core/ArrayWrapper.h0000644000176200001440000002030112446006401022015 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARRAYWRAPPER_H #define EIGEN_ARRAYWRAPPER_H namespace Eigen { /** \class ArrayWrapper * \ingroup Core_Module * * \brief Expression of a mathematical vector or matrix as an array object * * This class is the return type of MatrixBase::array(), and most of the time * this is the only way it is use. * * \sa MatrixBase::array(), class MatrixWrapper */ namespace internal { template struct traits > : public traits::type > { typedef ArrayXpr XprKind; // Let's remove NestByRefBit enum { Flags0 = traits::type >::Flags, Flags = Flags0 & ~NestByRefBit }; }; } template class ArrayWrapper : public ArrayBase > { public: typedef ArrayBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper) typedef typename internal::conditional< internal::is_lvalue::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; typedef typename internal::nested::type NestedExpressionType; inline ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {} inline Index rows() const { return m_expression.rows(); } inline Index cols() const { return m_expression.cols(); } inline Index outerStride() const { return m_expression.outerStride(); } inline Index innerStride() const { return m_expression.innerStride(); } inline ScalarWithConstIfNotLvalue* data() { return m_expression.const_cast_derived().data(); } inline const Scalar* data() const { return m_expression.data(); } inline CoeffReturnType coeff(Index rowId, Index colId) const { return m_expression.coeff(rowId, colId); } inline Scalar& coeffRef(Index rowId, Index colId) { return m_expression.const_cast_derived().coeffRef(rowId, colId); } inline const Scalar& coeffRef(Index rowId, Index colId) const { return m_expression.const_cast_derived().coeffRef(rowId, colId); } inline CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); } inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); } inline const Scalar& coeffRef(Index index) const { return m_expression.const_cast_derived().coeffRef(index); } template inline const PacketScalar packet(Index rowId, Index colId) const { return m_expression.template packet(rowId, colId); } template inline void writePacket(Index rowId, Index colId, const PacketScalar& val) { m_expression.const_cast_derived().template writePacket(rowId, colId, val); } template inline const PacketScalar packet(Index index) const { return m_expression.template packet(index); } template inline void writePacket(Index index, const PacketScalar& val) { m_expression.const_cast_derived().template writePacket(index, val); } template inline void evalTo(Dest& dst) const { dst = m_expression; } const typename internal::remove_all::type& nestedExpression() const { return m_expression; } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index) */ void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index,Index)*/ void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); } protected: NestedExpressionType m_expression; }; /** \class MatrixWrapper * \ingroup Core_Module * * \brief Expression of an array as a mathematical vector or matrix * * This class is the return type of ArrayBase::matrix(), and most of the time * this is the only way it is use. * * \sa MatrixBase::matrix(), class ArrayWrapper */ namespace internal { template struct traits > : public traits::type > { typedef MatrixXpr XprKind; // Let's remove NestByRefBit enum { Flags0 = traits::type >::Flags, Flags = Flags0 & ~NestByRefBit }; }; } template class MatrixWrapper : public MatrixBase > { public: typedef MatrixBase > Base; EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper) typedef typename internal::conditional< internal::is_lvalue::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; typedef typename internal::nested::type NestedExpressionType; inline MatrixWrapper(ExpressionType& a_matrix) : m_expression(a_matrix) {} inline Index rows() const { return m_expression.rows(); } inline Index cols() const { return m_expression.cols(); } inline Index outerStride() const { return m_expression.outerStride(); } inline Index innerStride() const { return m_expression.innerStride(); } inline ScalarWithConstIfNotLvalue* data() { return m_expression.const_cast_derived().data(); } inline const Scalar* data() const { return m_expression.data(); } inline CoeffReturnType coeff(Index rowId, Index colId) const { return m_expression.coeff(rowId, colId); } inline Scalar& coeffRef(Index rowId, Index colId) { return m_expression.const_cast_derived().coeffRef(rowId, colId); } inline const Scalar& coeffRef(Index rowId, Index colId) const { return m_expression.derived().coeffRef(rowId, colId); } inline CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); } inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); } inline const Scalar& coeffRef(Index index) const { return m_expression.const_cast_derived().coeffRef(index); } template inline const PacketScalar packet(Index rowId, Index colId) const { return m_expression.template packet(rowId, colId); } template inline void writePacket(Index rowId, Index colId, const PacketScalar& val) { m_expression.const_cast_derived().template writePacket(rowId, colId, val); } template inline const PacketScalar packet(Index index) const { return m_expression.template packet(index); } template inline void writePacket(Index index, const PacketScalar& val) { m_expression.const_cast_derived().template writePacket(index, val); } const typename internal::remove_all::type& nestedExpression() const { return m_expression; } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index) */ void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index,Index)*/ void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); } protected: NestedExpressionType m_expression; }; } // end namespace Eigen #endif // EIGEN_ARRAYWRAPPER_H RcppEigen/inst/include/Eigen/src/Core/GeneralProduct.h0000644000176200001440000006710112647570720022342 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2008-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_PRODUCT_H #define EIGEN_GENERAL_PRODUCT_H namespace Eigen { /** \class GeneralProduct * \ingroup Core_Module * * \brief Expression of the product of two general matrices or vectors * * \param LhsNested the type used to store the left-hand side * \param RhsNested the type used to store the right-hand side * \param ProductMode the type of the product * * This class represents an expression of the product of two general matrices. * We call a general matrix, a dense matrix with full storage. For instance, * This excludes triangular, selfadjoint, and sparse matrices. * It is the return type of the operator* between general matrices. Its template * arguments are determined automatically by ProductReturnType. Therefore, * GeneralProduct should never be used direclty. To determine the result type of a * function which involves a matrix product, use ProductReturnType::Type. * * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase&) */ template::value> class GeneralProduct; enum { Large = 2, Small = 3 }; namespace internal { template struct product_type_selector; template struct product_size_category { enum { is_large = MaxSize == Dynamic || Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD, value = is_large ? Large : Size == 1 ? 1 : Small }; }; template struct product_type { typedef typename remove_all::type _Lhs; typedef typename remove_all::type _Rhs; enum { MaxRows = _Lhs::MaxRowsAtCompileTime, Rows = _Lhs::RowsAtCompileTime, MaxCols = _Rhs::MaxColsAtCompileTime, Cols = _Rhs::ColsAtCompileTime, MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::MaxColsAtCompileTime, _Rhs::MaxRowsAtCompileTime), Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::ColsAtCompileTime, _Rhs::RowsAtCompileTime), LargeThreshold = EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD }; // the splitting into different lines of code here, introducing the _select enums and the typedef below, // is to work around an internal compiler error with gcc 4.1 and 4.2. private: enum { rows_select = product_size_category::value, cols_select = product_size_category::value, depth_select = product_size_category::value }; typedef product_type_selector selector; public: enum { value = selector::ret }; #ifdef EIGEN_DEBUG_PRODUCT static void debug() { EIGEN_DEBUG_VAR(Rows); EIGEN_DEBUG_VAR(Cols); EIGEN_DEBUG_VAR(Depth); EIGEN_DEBUG_VAR(rows_select); EIGEN_DEBUG_VAR(cols_select); EIGEN_DEBUG_VAR(depth_select); EIGEN_DEBUG_VAR(value); } #endif }; /* The following allows to select the kind of product at compile time * based on the three dimensions of the product. * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */ // FIXME I'm not sure the current mapping is the ideal one. template struct product_type_selector { enum { ret = OuterProduct }; }; template struct product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; }; template<> struct product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; template<> struct product_type_selector<1, Large,Small> { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; }; template<> struct product_type_selector<1, Small,Large> { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = GemvProduct }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; } // end namespace internal /** \class ProductReturnType * \ingroup Core_Module * * \brief Helper class to get the correct and optimized returned type of operator* * * \param Lhs the type of the left-hand side * \param Rhs the type of the right-hand side * \param ProductMode the type of the product (determined automatically by internal::product_mode) * * This class defines the typename Type representing the optimized product expression * between two matrix expressions. In practice, using ProductReturnType::Type * is the recommended way to define the result type of a function returning an expression * which involve a matrix product. The class Product should never be * used directly. * * \sa class Product, MatrixBase::operator*(const MatrixBase&) */ template struct ProductReturnType { // TODO use the nested type to reduce instanciations ???? // typedef typename internal::nested::type LhsNested; // typedef typename internal::nested::type RhsNested; typedef GeneralProduct Type; }; template struct ProductReturnType { typedef typename internal::nested::type >::type LhsNested; typedef typename internal::nested::type >::type RhsNested; typedef CoeffBasedProduct Type; }; template struct ProductReturnType { typedef typename internal::nested::type >::type LhsNested; typedef typename internal::nested::type >::type RhsNested; typedef CoeffBasedProduct Type; }; // this is a workaround for sun CC template struct LazyProductReturnType : public ProductReturnType {}; /*********************************************************************** * Implementation of Inner Vector Vector Product ***********************************************************************/ // FIXME : maybe the "inner product" could return a Scalar // instead of a 1x1 matrix ?? // Pro: more natural for the user // Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix // product ends up to a row-vector times col-vector product... To tackle this use // case, we could have a specialization for Block with: operator=(Scalar x); namespace internal { template struct traits > : traits::ReturnType,1,1> > {}; } template class GeneralProduct : internal::no_assignment_operator, public Matrix::ReturnType,1,1> { typedef Matrix::ReturnType,1,1> Base; public: GeneralProduct(const Lhs& lhs, const Rhs& rhs) { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum(); } /** Convertion to scalar */ operator const typename Base::Scalar() const { return Base::coeff(0,0); } }; /*********************************************************************** * Implementation of Outer Vector Vector Product ***********************************************************************/ namespace internal { // Column major template EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest& dest, const Func& func, const false_type&) { typedef typename Dest::Index Index; // FIXME make sure lhs is sequentially stored // FIXME not very good if rhs is real and lhs complex while alpha is real too const Index cols = dest.cols(); for (Index j=0; j EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest& dest, const Func& func, const true_type&) { typedef typename Dest::Index Index; // FIXME make sure rhs is sequentially stored // FIXME not very good if lhs is real and rhs complex while alpha is real too const Index rows = dest.rows(); for (Index i=0; i struct traits > : traits, Lhs, Rhs> > {}; } template class GeneralProduct : public ProductBase, Lhs, Rhs> { template struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {}; public: EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) } struct set { template void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } }; struct add { template void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } }; struct sub { template void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } }; struct adds { Scalar m_scale; adds(const Scalar& s) : m_scale(s) {} template void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += m_scale * src; } }; template inline void evalTo(Dest& dest) const { internal::outer_product_selector_run(*this, dest, set(), is_row_major()); } template inline void addTo(Dest& dest) const { internal::outer_product_selector_run(*this, dest, add(), is_row_major()); } template inline void subTo(Dest& dest) const { internal::outer_product_selector_run(*this, dest, sub(), is_row_major()); } template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const { internal::outer_product_selector_run(*this, dest, adds(alpha), is_row_major()); } }; /*********************************************************************** * Implementation of General Matrix Vector Product ***********************************************************************/ /* According to the shape/flags of the matrix we have to distinghish 3 different cases: * 1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine * 3 - all other cases are handled using a simple loop along the outer-storage direction. * Therefore we need a lower level meta selector. * Furthermore, if the matrix is the rhs, then the product has to be transposed. */ namespace internal { template struct traits > : traits, Lhs, Rhs> > {}; template struct gemv_selector; } // end namespace internal template class GeneralProduct : public ProductBase, Lhs, Rhs> { public: EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; GeneralProduct(const Lhs& a_lhs, const Rhs& a_rhs) : Base(a_lhs,a_rhs) { // EIGEN_STATIC_ASSERT((internal::is_same::value), // YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) } enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight }; typedef typename internal::conditional::type MatrixType; template void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols()); internal::gemv_selector::HasUsableDirectAccess)>::run(*this, dst, alpha); } }; namespace internal { // The vector is on the left => transposition template struct gemv_selector { template static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha) { Transpose destT(dest); enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor }; gemv_selector ::run(GeneralProduct,Transpose, GemvProduct> (prod.rhs().transpose(), prod.lhs().transpose()), destT, alpha); } }; template struct gemv_static_vector_if; template struct gemv_static_vector_if { EIGEN_STRONG_INLINE Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; } }; template struct gemv_static_vector_if { EIGEN_STRONG_INLINE Scalar* data() { return 0; } }; template struct gemv_static_vector_if { #if EIGEN_ALIGN_STATICALLY internal::plain_array m_data; EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; } #else // Some architectures cannot align on the stack, // => let's manually enforce alignment by allocating more data and return the address of the first aligned element. enum { ForceAlignment = internal::packet_traits::Vectorizable, PacketSize = internal::packet_traits::size }; internal::plain_array m_data; EIGEN_STRONG_INLINE Scalar* data() { return ForceAlignment ? reinterpret_cast((reinterpret_cast(m_data.array) & ~(size_t(15))) + 16) : m_data.array; } #endif }; template<> struct gemv_selector { template static inline void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha) { typedef typename ProductType::Index Index; typedef typename ProductType::LhsScalar LhsScalar; typedef typename ProductType::RhsScalar RhsScalar; typedef typename ProductType::Scalar ResScalar; typedef typename ProductType::RealScalar RealScalar; typedef typename ProductType::ActualLhsType ActualLhsType; typedef typename ProductType::ActualRhsType ActualRhsType; typedef typename ProductType::LhsBlasTraits LhsBlasTraits; typedef typename ProductType::RhsBlasTraits RhsBlasTraits; typedef Map, Aligned> MappedDest; ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs()); ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs()); ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) * RhsBlasTraits::extractScalarFactor(prod.rhs()); enum { // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 // on, the other hand it is good for the cache to pack the vector anyways... EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1, ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal }; gemv_static_vector_if static_dest; bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0)); bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; RhsScalar compatibleAlpha = get_factor::run(actualAlpha); ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), evalToDest ? dest.data() : static_dest.data()); if(!evalToDest) { #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN int size = dest.size(); EIGEN_DENSE_STORAGE_CTOR_PLUGIN #endif if(!alphaIsCompatible) { MappedDest(actualDestPtr, dest.size()).setZero(); compatibleAlpha = RhsScalar(1); } else MappedDest(actualDestPtr, dest.size()) = dest; } general_matrix_vector_product ::run( actualLhs.rows(), actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhs.data(), actualRhs.innerStride(), actualDestPtr, 1, compatibleAlpha); if (!evalToDest) { if(!alphaIsCompatible) dest += actualAlpha * MappedDest(actualDestPtr, dest.size()); else dest = MappedDest(actualDestPtr, dest.size()); } } }; template<> struct gemv_selector { template static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha) { typedef typename ProductType::LhsScalar LhsScalar; typedef typename ProductType::RhsScalar RhsScalar; typedef typename ProductType::Scalar ResScalar; typedef typename ProductType::Index Index; typedef typename ProductType::ActualLhsType ActualLhsType; typedef typename ProductType::ActualRhsType ActualRhsType; typedef typename ProductType::_ActualRhsType _ActualRhsType; typedef typename ProductType::LhsBlasTraits LhsBlasTraits; typedef typename ProductType::RhsBlasTraits RhsBlasTraits; typename add_const::type actualLhs = LhsBlasTraits::extract(prod.lhs()); typename add_const::type actualRhs = RhsBlasTraits::extract(prod.rhs()); ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) * RhsBlasTraits::extractScalarFactor(prod.rhs()); enum { // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 // on, the other hand it is good for the cache to pack the vector anyways... DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1 }; gemv_static_vector_if static_rhs; ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(), DirectlyUseRhs ? const_cast(actualRhs.data()) : static_rhs.data()); if(!DirectlyUseRhs) { #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN int size = actualRhs.size(); EIGEN_DENSE_STORAGE_CTOR_PLUGIN #endif Map(actualRhsPtr, actualRhs.size()) = actualRhs; } general_matrix_vector_product ::run( actualLhs.rows(), actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhsPtr, 1, dest.data(), dest.innerStride(), actualAlpha); } }; template<> struct gemv_selector { template static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha) { typedef typename Dest::Index Index; // TODO makes sure dest is sequentially stored in memory, otherwise use a temp const Index size = prod.rhs().rows(); for(Index k=0; k struct gemv_selector { template static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha) { typedef typename Dest::Index Index; // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp const Index rows = prod.rows(); for(Index i=0; i template inline const typename ProductReturnType::Type MatrixBase::operator*(const MatrixBase &other) const { // A note regarding the function declaration: In MSVC, this function will sometimes // not be inlined since DenseStorage is an unwindable object for dynamic // matrices and product types are holding a member to store the result. // Thus it does not help tagging this function with EIGEN_STRONG_INLINE. enum { ProductIsValid = Derived::ColsAtCompileTime==Dynamic || OtherDerived::RowsAtCompileTime==Dynamic || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) }; // note to the lost user: // * for a dot product use: v1.dot(v2) // * for a coeff-wise product use: v1.cwiseProduct(v2) EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) #ifdef EIGEN_DEBUG_PRODUCT internal::product_type::debug(); #endif return typename ProductReturnType::Type(derived(), other.derived()); } /** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation. * * The returned product will behave like any other expressions: the coefficients of the product will be * computed once at a time as requested. This might be useful in some extremely rare cases when only * a small and no coherent fraction of the result's coefficients have to be computed. * * \warning This version of the matrix product can be much much slower. So use it only if you know * what you are doing and that you measured a true speed improvement. * * \sa operator*(const MatrixBase&) */ template template const typename LazyProductReturnType::Type MatrixBase::lazyProduct(const MatrixBase &other) const { enum { ProductIsValid = Derived::ColsAtCompileTime==Dynamic || OtherDerived::RowsAtCompileTime==Dynamic || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) }; // note to the lost user: // * for a dot product use: v1.dot(v2) // * for a coeff-wise product use: v1.cwiseProduct(v2) EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) return typename LazyProductReturnType::Type(derived(), other.derived()); } } // end namespace Eigen #endif // EIGEN_PRODUCT_H RcppEigen/inst/include/Eigen/src/Core/DiagonalMatrix.h0000644000176200001440000002647512253717461022336 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2007-2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DIAGONALMATRIX_H #define EIGEN_DIAGONALMATRIX_H namespace Eigen { #ifndef EIGEN_PARSED_BY_DOXYGEN template class DiagonalBase : public EigenBase { public: typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; typedef typename DiagonalVectorType::Scalar Scalar; typedef typename DiagonalVectorType::RealScalar RealScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; enum { RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, IsVectorAtCompileTime = 0, Flags = 0 }; typedef Matrix DenseMatrixType; typedef DenseMatrixType DenseType; typedef DiagonalMatrix PlainObject; inline const Derived& derived() const { return *static_cast(this); } inline Derived& derived() { return *static_cast(this); } DenseMatrixType toDenseMatrix() const { return derived(); } template void evalTo(MatrixBase &other) const; template void addTo(MatrixBase &other) const { other.diagonal() += diagonal(); } template void subTo(MatrixBase &other) const { other.diagonal() -= diagonal(); } inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); } inline DiagonalVectorType& diagonal() { return derived().diagonal(); } inline Index rows() const { return diagonal().size(); } inline Index cols() const { return diagonal().size(); } /** \returns the diagonal matrix product of \c *this by the matrix \a matrix. */ template const DiagonalProduct operator*(const MatrixBase &matrix) const { return DiagonalProduct(matrix.derived(), derived()); } inline const DiagonalWrapper, const DiagonalVectorType> > inverse() const { return diagonal().cwiseInverse(); } inline const DiagonalWrapper, const DiagonalVectorType> > operator*(const Scalar& scalar) const { return diagonal() * scalar; } friend inline const DiagonalWrapper, const DiagonalVectorType> > operator*(const Scalar& scalar, const DiagonalBase& other) { return other.diagonal() * scalar; } #ifdef EIGEN2_SUPPORT template bool isApprox(const DiagonalBase& other, typename NumTraits::Real precision = NumTraits::dummy_precision()) const { return diagonal().isApprox(other.diagonal(), precision); } template bool isApprox(const MatrixBase& other, typename NumTraits::Real precision = NumTraits::dummy_precision()) const { return toDenseMatrix().isApprox(other, precision); } #endif }; template template void DiagonalBase::evalTo(MatrixBase &other) const { other.setZero(); other.diagonal() = diagonal(); } #endif /** \class DiagonalMatrix * \ingroup Core_Module * * \brief Represents a diagonal matrix with its storage * * \param _Scalar the type of coefficients * \param SizeAtCompileTime the dimension of the matrix, or Dynamic * \param MaxSizeAtCompileTime the dimension of the matrix, or Dynamic. This parameter is optional and defaults * to SizeAtCompileTime. Most of the time, you do not need to specify it. * * \sa class DiagonalWrapper */ namespace internal { template struct traits > : traits > { typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType; typedef Dense StorageKind; typedef DenseIndex Index; enum { Flags = LvalueBit }; }; } template class DiagonalMatrix : public DiagonalBase > { public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; typedef const DiagonalMatrix& Nested; typedef _Scalar Scalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; #endif protected: DiagonalVectorType m_diagonal; public: /** const version of diagonal(). */ inline const DiagonalVectorType& diagonal() const { return m_diagonal; } /** \returns a reference to the stored vector of diagonal coefficients. */ inline DiagonalVectorType& diagonal() { return m_diagonal; } /** Default constructor without initialization */ inline DiagonalMatrix() {} /** Constructs a diagonal matrix with given dimension */ inline DiagonalMatrix(Index dim) : m_diagonal(dim) {} /** 2D constructor. */ inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {} /** 3D constructor. */ inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {} /** Copy constructor. */ template inline DiagonalMatrix(const DiagonalBase& other) : m_diagonal(other.diagonal()) {} #ifndef EIGEN_PARSED_BY_DOXYGEN /** copy constructor. prevent a default copy constructor from hiding the other templated constructor */ inline DiagonalMatrix(const DiagonalMatrix& other) : m_diagonal(other.diagonal()) {} #endif /** generic constructor from expression of the diagonal coefficients */ template explicit inline DiagonalMatrix(const MatrixBase& other) : m_diagonal(other) {} /** Copy operator. */ template DiagonalMatrix& operator=(const DiagonalBase& other) { m_diagonal = other.diagonal(); return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ DiagonalMatrix& operator=(const DiagonalMatrix& other) { m_diagonal = other.diagonal(); return *this; } #endif /** Resizes to given size. */ inline void resize(Index size) { m_diagonal.resize(size); } /** Sets all coefficients to zero. */ inline void setZero() { m_diagonal.setZero(); } /** Resizes and sets all coefficients to zero. */ inline void setZero(Index size) { m_diagonal.setZero(size); } /** Sets this matrix to be the identity matrix of the current size. */ inline void setIdentity() { m_diagonal.setOnes(); } /** Sets this matrix to be the identity matrix of the given size. */ inline void setIdentity(Index size) { m_diagonal.setOnes(size); } }; /** \class DiagonalWrapper * \ingroup Core_Module * * \brief Expression of a diagonal matrix * * \param _DiagonalVectorType the type of the vector of diagonal coefficients * * This class is an expression of a diagonal matrix, but not storing its own vector of diagonal coefficients, * instead wrapping an existing vector expression. It is the return type of MatrixBase::asDiagonal() * and most of the time this is the only way that it is used. * * \sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal() */ namespace internal { template struct traits > { typedef _DiagonalVectorType DiagonalVectorType; typedef typename DiagonalVectorType::Scalar Scalar; typedef typename DiagonalVectorType::Index Index; typedef typename DiagonalVectorType::StorageKind StorageKind; enum { RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, MaxRowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, MaxColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, Flags = traits::Flags & LvalueBit }; }; } template class DiagonalWrapper : public DiagonalBase >, internal::no_assignment_operator { public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef _DiagonalVectorType DiagonalVectorType; typedef DiagonalWrapper Nested; #endif /** Constructor from expression of diagonal coefficients to wrap. */ inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {} /** \returns a const reference to the wrapped expression of diagonal coefficients. */ const DiagonalVectorType& diagonal() const { return m_diagonal; } protected: typename DiagonalVectorType::Nested m_diagonal; }; /** \returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients * * \only_for_vectors * * Example: \include MatrixBase_asDiagonal.cpp * Output: \verbinclude MatrixBase_asDiagonal.out * * \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal() **/ template inline const DiagonalWrapper MatrixBase::asDiagonal() const { return derived(); } /** \returns true if *this is approximately equal to a diagonal matrix, * within the precision given by \a prec. * * Example: \include MatrixBase_isDiagonal.cpp * Output: \verbinclude MatrixBase_isDiagonal.out * * \sa asDiagonal() */ template bool MatrixBase::isDiagonal(const RealScalar& prec) const { using std::abs; if(cols() != rows()) return false; RealScalar maxAbsOnDiagonal = static_cast(-1); for(Index j = 0; j < cols(); ++j) { RealScalar absOnDiagonal = abs(coeff(j,j)); if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal; } for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < j; ++i) { if(!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false; if(!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false; } return true; } } // end namespace Eigen #endif // EIGEN_DIAGONALMATRIX_H RcppEigen/inst/include/Eigen/src/Core/DiagonalProduct.h0000644000176200001440000001354412647570720022505 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2007-2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DIAGONALPRODUCT_H #define EIGEN_DIAGONALPRODUCT_H namespace Eigen { namespace internal { template struct traits > : traits { typedef typename scalar_product_traits::ReturnType Scalar; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, _StorageOrder = MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor, _ScalarAccessOnDiag = !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft) ||(int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheRight)), _SameTypes = is_same::value, // FIXME currently we need same types, but in the future the next rule should be the one //_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))), _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))), _LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0, Flags = ((HereditaryBits|_LinearAccessMask|AlignedBit) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit), Cost0 = EIGEN_ADD_COST(NumTraits::MulCost, MatrixType::CoeffReadCost), CoeffReadCost = EIGEN_ADD_COST(Cost0,DiagonalType::DiagonalVectorType::CoeffReadCost) }; }; } template class DiagonalProduct : internal::no_assignment_operator, public MatrixBase > { public: typedef MatrixBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(DiagonalProduct) inline DiagonalProduct(const MatrixType& matrix, const DiagonalType& diagonal) : m_matrix(matrix), m_diagonal(diagonal) { eigen_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols())); } EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); } EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const { return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col); } EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const { enum { StorageOrder = int(MatrixType::Flags) & RowMajorBit ? RowMajor : ColMajor }; return coeff(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx); } template EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const { enum { StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor }; const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col; return packet_impl(row,col,indexInDiagonalVector,typename internal::conditional< ((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft) ||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), internal::true_type, internal::false_type>::type()); } template EIGEN_STRONG_INLINE PacketScalar packet(Index idx) const { enum { StorageOrder = int(MatrixType::Flags) & RowMajorBit ? RowMajor : ColMajor }; return packet(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx); } protected: template EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::true_type) const { return internal::pmul(m_matrix.template packet(row, col), internal::pset1(m_diagonal.diagonal().coeff(id))); } template EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::false_type) const { enum { InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime, DiagonalVectorPacketLoadMode = (LoadMode == Aligned && (((InnerSize%16) == 0) || (int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit)==AlignedBit) ? Aligned : Unaligned) }; return internal::pmul(m_matrix.template packet(row, col), m_diagonal.diagonal().template packet(id)); } typename MatrixType::Nested m_matrix; typename DiagonalType::Nested m_diagonal; }; /** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal. */ template template inline const DiagonalProduct MatrixBase::operator*(const DiagonalBase &a_diagonal) const { return DiagonalProduct(derived(), a_diagonal.derived()); } } // end namespace Eigen #endif // EIGEN_DIAGONALPRODUCT_H RcppEigen/inst/include/Eigen/src/Core/Matrix.h0000644000176200001440000004171512647570720020673 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_H #define EIGEN_MATRIX_H namespace Eigen { /** \class Matrix * \ingroup Core_Module * * \brief The matrix class, also used for vectors and row-vectors * * The %Matrix class is the work-horse for all \em dense (\ref dense "note") matrices and vectors within Eigen. * Vectors are matrices with one column, and row-vectors are matrices with one row. * * The %Matrix class encompasses \em both fixed-size and dynamic-size objects (\ref fixedsize "note"). * * The first three template parameters are required: * \tparam _Scalar \anchor matrix_tparam_scalar Numeric type, e.g. float, double, int or std::complex. * User defined sclar types are supported as well (see \ref user_defined_scalars "here"). * \tparam _Rows Number of rows, or \b Dynamic * \tparam _Cols Number of columns, or \b Dynamic * * The remaining template parameters are optional -- in most cases you don't have to worry about them. * \tparam _Options \anchor matrix_tparam_options A combination of either \b #RowMajor or \b #ColMajor, and of either * \b #AutoAlign or \b #DontAlign. * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required * for vectorization. It defaults to aligning matrices except for fixed sizes that aren't a multiple of the packet size. * \tparam _MaxRows Maximum number of rows. Defaults to \a _Rows (\ref maxrows "note"). * \tparam _MaxCols Maximum number of columns. Defaults to \a _Cols (\ref maxrows "note"). * * Eigen provides a number of typedefs covering the usual cases. Here are some examples: * * \li \c Matrix2d is a 2x2 square matrix of doubles (\c Matrix) * \li \c Vector4f is a vector of 4 floats (\c Matrix) * \li \c RowVector3i is a row-vector of 3 ints (\c Matrix) * * \li \c MatrixXf is a dynamic-size matrix of floats (\c Matrix) * \li \c VectorXf is a dynamic-size vector of floats (\c Matrix) * * \li \c Matrix2Xf is a partially fixed-size (dynamic-size) matrix of floats (\c Matrix) * \li \c MatrixX3d is a partially dynamic-size (fixed-size) matrix of double (\c Matrix) * * See \link matrixtypedefs this page \endlink for a complete list of predefined \em %Matrix and \em Vector typedefs. * * You can access elements of vectors and matrices using normal subscripting: * * \code * Eigen::VectorXd v(10); * v[0] = 0.1; * v[1] = 0.2; * v(0) = 0.3; * v(1) = 0.4; * * Eigen::MatrixXi m(10, 10); * m(0, 1) = 1; * m(0, 2) = 2; * m(0, 3) = 3; * \endcode * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_MATRIX_PLUGIN. * * Some notes: * *
*
\anchor dense Dense versus sparse:
*
This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the Sparse module. * * Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary contiguous array. * This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero coefficients.
* *
\anchor fixedsize Fixed-size versus dynamic-size:
*
Fixed-size means that the numbers of rows and columns are known are compile-time. In this case, Eigen allocates the array * of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices, typically up to 4x4, sometimes up * to 16x16. Larger matrices should be declared as dynamic-size even if one happens to know their size at compile-time. * * Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they are runtime * variables, and the array of coefficients is allocated dynamically on the heap. * * Note that \em dense matrices, be they Fixed-size or Dynamic-size, do not expand dynamically in the sense of a std::map. * If you want this behavior, see the Sparse module.
* *
\anchor maxrows _MaxRows and _MaxCols:
*
In most cases, one just leaves these parameters to the default values. * These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases * when the exact numbers of rows and columns are not known are compile-time, but it is known at compile-time that they cannot * exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case _MaxRows and _MaxCols * are the dimensions of the original matrix, while _Rows and _Cols are Dynamic.
*
* * \see MatrixBase for the majority of the API methods for matrices, \ref TopicClassHierarchy, * \ref TopicStorageOrders */ namespace internal { template struct traits > { typedef _Scalar Scalar; typedef Dense StorageKind; typedef DenseIndex Index; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = _Rows, ColsAtCompileTime = _Cols, MaxRowsAtCompileTime = _MaxRows, MaxColsAtCompileTime = _MaxCols, Flags = compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret, CoeffReadCost = NumTraits::ReadCost, Options = _Options, InnerStrideAtCompileTime = 1, OuterStrideAtCompileTime = (Options&RowMajor) ? ColsAtCompileTime : RowsAtCompileTime }; }; } template class Matrix : public PlainObjectBase > { public: /** \brief Base class typedef. * \sa PlainObjectBase */ typedef PlainObjectBase Base; enum { Options = _Options }; EIGEN_DENSE_PUBLIC_INTERFACE(Matrix) typedef typename Base::PlainObject PlainObject; using Base::base; using Base::coeffRef; /** * \brief Assigns matrices to each other. * * \note This is a special case of the templated operator=. Its purpose is * to prevent a default operator= from hiding the templated operator=. * * \callgraph */ EIGEN_STRONG_INLINE Matrix& operator=(const Matrix& other) { return Base::_set(other); } /** \internal * \brief Copies the value of the expression \a other into \c *this with automatic resizing. * * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), * it will be initialized. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template EIGEN_STRONG_INLINE Matrix& operator=(const MatrixBase& other) { return Base::_set(other); } /* Here, doxygen failed to copy the brief information when using \copydoc */ /** * \brief Copies the generic expression \a other into *this. * \copydetails DenseBase::operator=(const EigenBase &other) */ template EIGEN_STRONG_INLINE Matrix& operator=(const EigenBase &other) { return Base::operator=(other); } template EIGEN_STRONG_INLINE Matrix& operator=(const ReturnByValue& func) { return Base::operator=(func); } /** \brief Default constructor. * * For fixed-size matrices, does nothing. * * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix * is called a null matrix. This constructor is the unique way to create null matrices: resizing * a matrix to 0 is not supported. * * \sa resize(Index,Index) */ EIGEN_STRONG_INLINE Matrix() : Base() { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } // FIXME is it still needed Matrix(internal::constructor_without_unaligned_array_assert) : Base(internal::constructor_without_unaligned_array_assert()) { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #ifdef EIGEN_HAVE_RVALUE_REFERENCES Matrix(Matrix&& other) : Base(std::move(other)) { Base::_check_template_params(); if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic) Base::_set_noalias(other); } Matrix& operator=(Matrix&& other) { other.swap(*this); return *this; } #endif /** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors * * Note that this is only useful for dynamic-size vectors. For fixed-size vectors, * it is redundant to pass the dimension here, so it makes more sense to use the default * constructor Matrix() instead. */ EIGEN_STRONG_INLINE explicit Matrix(Index dim) : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix) eigen_assert(dim >= 0); eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #ifndef EIGEN_PARSED_BY_DOXYGEN template EIGEN_STRONG_INLINE Matrix(const T0& x, const T1& y) { Base::_check_template_params(); Base::template _init2(x, y); } #else /** \brief Constructs an uninitialized matrix with \a rows rows and \a cols columns. * * This is useful for dynamic-size matrices. For fixed-size matrices, * it is redundant to pass these parameters, so one should use the default constructor * Matrix() instead. */ Matrix(Index rows, Index cols); /** \brief Constructs an initialized 2D vector with given coefficients */ Matrix(const Scalar& x, const Scalar& y); #endif /** \brief Constructs an initialized 3D vector with given coefficients */ EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3) m_storage.data()[0] = x; m_storage.data()[1] = y; m_storage.data()[2] = z; } /** \brief Constructs an initialized 4D vector with given coefficients */ EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4) m_storage.data()[0] = x; m_storage.data()[1] = y; m_storage.data()[2] = z; m_storage.data()[3] = w; } explicit Matrix(const Scalar *data); /** \brief Constructor copying the value of the expression \a other */ template EIGEN_STRONG_INLINE Matrix(const MatrixBase& other) : Base(other.rows() * other.cols(), other.rows(), other.cols()) { // This test resides here, to bring the error messages closer to the user. Normally, these checks // are performed deeply within the library, thus causing long and scary error traces. EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) Base::_check_template_params(); Base::_set_noalias(other); } /** \brief Copy constructor */ EIGEN_STRONG_INLINE Matrix(const Matrix& other) : Base(other.rows() * other.cols(), other.rows(), other.cols()) { Base::_check_template_params(); Base::_set_noalias(other); } /** \brief Copy constructor with in-place evaluation */ template EIGEN_STRONG_INLINE Matrix(const ReturnByValue& other) { Base::_check_template_params(); Base::resize(other.rows(), other.cols()); other.evalTo(*this); } /** \brief Copy constructor for generic expressions. * \sa MatrixBase::operator=(const EigenBase&) */ template EIGEN_STRONG_INLINE Matrix(const EigenBase &other) : Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) { Base::_check_template_params(); Base::_resize_to_match(other); // FIXME/CHECK: isn't *this = other.derived() more efficient. it allows to // go for pure _set() implementations, right? *this = other; } /** \internal * \brief Override MatrixBase::swap() since for dynamic-sized matrices * of same type it is enough to swap the data pointers. */ template void swap(MatrixBase const & other) { this->_swap(other.derived()); } inline Index innerStride() const { return 1; } inline Index outerStride() const { return this->innerSize(); } /////////// Geometry module /////////// template explicit Matrix(const RotationBase& r); template Matrix& operator=(const RotationBase& r); #ifdef EIGEN2_SUPPORT template explicit Matrix(const eigen2_RotationBase& r); template Matrix& operator=(const eigen2_RotationBase& r); #endif // allow to extend Matrix outside Eigen #ifdef EIGEN_MATRIX_PLUGIN #include EIGEN_MATRIX_PLUGIN #endif protected: template friend struct internal::conservative_resize_like_impl; using Base::m_storage; }; /** \defgroup matrixtypedefs Global matrix typedefs * * \ingroup Core_Module * * Eigen defines several typedef shortcuts for most common matrix and vector types. * * The general patterns are the following: * * \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd * for complex double. * * For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of floats. * * There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is * a fixed-size vector of 4 complex floats. * * \sa class Matrix */ #define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ /** \ingroup matrixtypedefs */ \ typedef Matrix Matrix##SizeSuffix##TypeSuffix; \ /** \ingroup matrixtypedefs */ \ typedef Matrix Vector##SizeSuffix##TypeSuffix; \ /** \ingroup matrixtypedefs */ \ typedef Matrix RowVector##SizeSuffix##TypeSuffix; #define EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ /** \ingroup matrixtypedefs */ \ typedef Matrix Matrix##Size##X##TypeSuffix; \ /** \ingroup matrixtypedefs */ \ typedef Matrix Matrix##X##Size##TypeSuffix; #define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 4) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cf) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cd) #undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES #undef EIGEN_MAKE_TYPEDEFS #undef EIGEN_MAKE_FIXED_TYPEDEFS } // end namespace Eigen #endif // EIGEN_MATRIX_H RcppEigen/inst/include/Eigen/src/Core/GenericPacketMath.h0000644000176200001440000003005112253717461022732 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERIC_PACKET_MATH_H #define EIGEN_GENERIC_PACKET_MATH_H namespace Eigen { namespace internal { /** \internal * \file GenericPacketMath.h * * Default implementation for types not supported by the vectorization. * In practice these functions are provided to make easier the writing * of generic vectorized code. */ #ifndef EIGEN_DEBUG_ALIGNED_LOAD #define EIGEN_DEBUG_ALIGNED_LOAD #endif #ifndef EIGEN_DEBUG_UNALIGNED_LOAD #define EIGEN_DEBUG_UNALIGNED_LOAD #endif #ifndef EIGEN_DEBUG_ALIGNED_STORE #define EIGEN_DEBUG_ALIGNED_STORE #endif #ifndef EIGEN_DEBUG_UNALIGNED_STORE #define EIGEN_DEBUG_UNALIGNED_STORE #endif struct default_packet_traits { enum { HasAdd = 1, HasSub = 1, HasMul = 1, HasNegate = 1, HasAbs = 1, HasAbs2 = 1, HasMin = 1, HasMax = 1, HasConj = 1, HasSetLinear = 1, HasDiv = 0, HasSqrt = 0, HasExp = 0, HasLog = 0, HasPow = 0, HasSin = 0, HasCos = 0, HasTan = 0, HasASin = 0, HasACos = 0, HasATan = 0 }; }; template struct packet_traits : default_packet_traits { typedef T type; enum { Vectorizable = 0, size = 1, AlignedOnScalar = 0 }; enum { HasAdd = 0, HasSub = 0, HasMul = 0, HasNegate = 0, HasAbs = 0, HasAbs2 = 0, HasMin = 0, HasMax = 0, HasConj = 0, HasSetLinear = 0 }; }; /** \internal \returns a + b (coeff-wise) */ template inline Packet padd(const Packet& a, const Packet& b) { return a+b; } /** \internal \returns a - b (coeff-wise) */ template inline Packet psub(const Packet& a, const Packet& b) { return a-b; } /** \internal \returns -a (coeff-wise) */ template inline Packet pnegate(const Packet& a) { return -a; } /** \internal \returns conj(a) (coeff-wise) */ template inline Packet pconj(const Packet& a) { return numext::conj(a); } /** \internal \returns a * b (coeff-wise) */ template inline Packet pmul(const Packet& a, const Packet& b) { return a*b; } /** \internal \returns a / b (coeff-wise) */ template inline Packet pdiv(const Packet& a, const Packet& b) { return a/b; } /** \internal \returns the min of \a a and \a b (coeff-wise) */ template inline Packet pmin(const Packet& a, const Packet& b) { using std::min; return (min)(a, b); } /** \internal \returns the max of \a a and \a b (coeff-wise) */ template inline Packet pmax(const Packet& a, const Packet& b) { using std::max; return (max)(a, b); } /** \internal \returns the absolute value of \a a */ template inline Packet pabs(const Packet& a) { using std::abs; return abs(a); } /** \internal \returns the bitwise and of \a a and \a b */ template inline Packet pand(const Packet& a, const Packet& b) { return a & b; } /** \internal \returns the bitwise or of \a a and \a b */ template inline Packet por(const Packet& a, const Packet& b) { return a | b; } /** \internal \returns the bitwise xor of \a a and \a b */ template inline Packet pxor(const Packet& a, const Packet& b) { return a ^ b; } /** \internal \returns the bitwise andnot of \a a and \a b */ template inline Packet pandnot(const Packet& a, const Packet& b) { return a & (!b); } /** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */ template inline Packet pload(const typename unpacket_traits::type* from) { return *from; } /** \internal \returns a packet version of \a *from, (un-aligned load) */ template inline Packet ploadu(const typename unpacket_traits::type* from) { return *from; } /** \internal \returns a packet with elements of \a *from duplicated. * For instance, for a packet of 8 elements, 4 scalar will be read from \a *from and * duplicated to form: {from[0],from[0],from[1],from[1],,from[2],from[2],,from[3],from[3]} * Currently, this function is only used for scalar * complex products. */ template inline Packet ploaddup(const typename unpacket_traits::type* from) { return *from; } /** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */ template inline Packet pset1(const typename unpacket_traits::type& a) { return a; } /** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */ template inline typename packet_traits::type plset(const Scalar& a) { return a; } /** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */ template inline void pstore(Scalar* to, const Packet& from) { (*to) = from; } /** \internal copy the packet \a from to \a *to, (un-aligned store) */ template inline void pstoreu(Scalar* to, const Packet& from) { (*to) = from; } /** \internal tries to do cache prefetching of \a addr */ template inline void prefetch(const Scalar* addr) { #if !defined(_MSC_VER) __builtin_prefetch(addr); #endif } /** \internal \returns the first element of a packet */ template inline typename unpacket_traits::type pfirst(const Packet& a) { return a; } /** \internal \returns a packet where the element i contains the sum of the packet of \a vec[i] */ template inline Packet preduxp(const Packet* vecs) { return vecs[0]; } /** \internal \returns the sum of the elements of \a a*/ template inline typename unpacket_traits::type predux(const Packet& a) { return a; } /** \internal \returns the product of the elements of \a a*/ template inline typename unpacket_traits::type predux_mul(const Packet& a) { return a; } /** \internal \returns the min of the elements of \a a*/ template inline typename unpacket_traits::type predux_min(const Packet& a) { return a; } /** \internal \returns the max of the elements of \a a*/ template inline typename unpacket_traits::type predux_max(const Packet& a) { return a; } /** \internal \returns the reversed elements of \a a*/ template inline Packet preverse(const Packet& a) { return a; } /** \internal \returns \a a with real and imaginary part flipped (for complex type only) */ template inline Packet pcplxflip(const Packet& a) { // FIXME: uncomment the following in case we drop the internal imag and real functions. // using std::imag; // using std::real; return Packet(imag(a),real(a)); } /************************** * Special math functions ***************************/ /** \internal \returns the sine of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psin(const Packet& a) { using std::sin; return sin(a); } /** \internal \returns the cosine of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcos(const Packet& a) { using std::cos; return cos(a); } /** \internal \returns the tan of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet ptan(const Packet& a) { using std::tan; return tan(a); } /** \internal \returns the arc sine of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pasin(const Packet& a) { using std::asin; return asin(a); } /** \internal \returns the arc cosine of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pacos(const Packet& a) { using std::acos; return acos(a); } /** \internal \returns the exp of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pexp(const Packet& a) { using std::exp; return exp(a); } /** \internal \returns the log of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog(const Packet& a) { using std::log; return log(a); } /** \internal \returns the square-root of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psqrt(const Packet& a) { using std::sqrt; return sqrt(a); } /*************************************************************************** * The following functions might not have to be overwritten for vectorized types ***************************************************************************/ /** \internal copy a packet with constant coeficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */ // NOTE: this function must really be templated on the packet type (think about different packet types for the same scalar type) template inline void pstore1(typename unpacket_traits::type* to, const typename unpacket_traits::type& a) { pstore(to, pset1(a)); } /** \internal \returns a * b + c (coeff-wise) */ template inline Packet pmadd(const Packet& a, const Packet& b, const Packet& c) { return padd(pmul(a, b),c); } /** \internal \returns a packet version of \a *from. * If LoadMode equals #Aligned, \a from must be 16 bytes aligned */ template inline Packet ploadt(const typename unpacket_traits::type* from) { if(LoadMode == Aligned) return pload(from); else return ploadu(from); } /** \internal copy the packet \a from to \a *to. * If StoreMode equals #Aligned, \a to must be 16 bytes aligned */ template inline void pstoret(Scalar* to, const Packet& from) { if(LoadMode == Aligned) pstore(to, from); else pstoreu(to, from); } /** \internal default implementation of palign() allowing partial specialization */ template struct palign_impl { // by default data are aligned, so there is nothing to be done :) static inline void run(PacketType&, const PacketType&) {} }; /** \internal update \a first using the concatenation of the packet_size minus \a Offset last elements * of \a first and \a Offset first elements of \a second. * * This function is currently only used to optimize matrix-vector products on unligned matrices. * It takes 2 packets that represent a contiguous memory array, and returns a packet starting * at the position \a Offset. For instance, for packets of 4 elements, we have: * Input: * - first = {f0,f1,f2,f3} * - second = {s0,s1,s2,s3} * Output: * - if Offset==0 then {f0,f1,f2,f3} * - if Offset==1 then {f1,f2,f3,s0} * - if Offset==2 then {f2,f3,s0,s1} * - if Offset==3 then {f3,s0,s1,s3} */ template inline void palign(PacketType& first, const PacketType& second) { palign_impl::run(first,second); } /*************************************************************************** * Fast complex products (GCC generates a function call which is very slow) ***************************************************************************/ template<> inline std::complex pmul(const std::complex& a, const std::complex& b) { return std::complex(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); } template<> inline std::complex pmul(const std::complex& a, const std::complex& b) { return std::complex(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); } } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERIC_PACKET_MATH_H RcppEigen/inst/include/Eigen/src/Core/Select.h0000644000176200001440000001372112253717461020640 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SELECT_H #define EIGEN_SELECT_H namespace Eigen { /** \class Select * \ingroup Core_Module * * \brief Expression of a coefficient wise version of the C++ ternary operator ?: * * \param ConditionMatrixType the type of the \em condition expression which must be a boolean matrix * \param ThenMatrixType the type of the \em then expression * \param ElseMatrixType the type of the \em else expression * * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:. * It is the return type of DenseBase::select() and most of the time this is the only way it is used. * * \sa DenseBase::select(const DenseBase&, const DenseBase&) const */ namespace internal { template struct traits > : traits { typedef typename traits::Scalar Scalar; typedef Dense StorageKind; typedef typename traits::XprKind XprKind; typedef typename ConditionMatrixType::Nested ConditionMatrixNested; typedef typename ThenMatrixType::Nested ThenMatrixNested; typedef typename ElseMatrixType::Nested ElseMatrixNested; enum { RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime, ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime, Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & HereditaryBits, CoeffReadCost = traits::type>::CoeffReadCost + EIGEN_SIZE_MAX(traits::type>::CoeffReadCost, traits::type>::CoeffReadCost) }; }; } template class Select : internal::no_assignment_operator, public internal::dense_xpr_base< Select >::type { public: typedef typename internal::dense_xpr_base