Automatic Differentiation
 
Loading...
Searching...
No Matches
laplace_marginal_density_estimator.hpp
Go to the documentation of this file.
1#ifndef STAN_MATH_MIX_FUNCTOR_LAPLACE_MARGINAL_DENSITY_ESTIMATOR_HPP
2#define STAN_MATH_MIX_FUNCTOR_LAPLACE_MARGINAL_DENSITY_ESTIMATOR_HPP
16#include <unsupported/Eigen/MatrixFunctions>
17#include <cmath>
18#include <mutex>
19
27namespace stan {
28namespace math {
29
34 /* Size of the blocks in block diagonal hessian*/
60 /* Maximum number of steps*/
66 laplace_options_base(int hessian_block_size_, int solver_, double tolerance_,
67 int max_num_steps_, bool allow_fallthrough_,
68 int max_steps_line_search_)
69 : hessian_block_size(hessian_block_size_),
70 solver(solver_),
71 tolerance(tolerance_),
72 max_num_steps(max_num_steps_),
73 allow_fallthrough(allow_fallthrough_),
74 line_search(max_steps_line_search_) {}
75};
76
77template <bool HasInitTheta>
79
80template <>
81struct laplace_options<false> : public laplace_options_base {
82 laplace_options() = default;
83
84 explicit laplace_options(int hessian_block_size_) {
85 hessian_block_size = hessian_block_size_;
86 }
87};
88
89template <>
91 /* Value for user supplied initial theta */
92 Eigen::VectorXd theta_0{0}; // 6
93
94 template <typename ThetaVec>
95 laplace_options(ThetaVec&& theta_0_, double tolerance_, int max_num_steps_,
96 int hessian_block_size_, int solver_,
97 int max_steps_line_search_, bool allow_fallthrough_)
98 : laplace_options_base(hessian_block_size_, solver_, tolerance_,
99 max_num_steps_, allow_fallthrough_,
100 max_steps_line_search_),
101 theta_0(value_of(std::forward<ThetaVec>(theta_0_))) {}
102};
103
106
107namespace internal {
108
109template <typename Options>
110inline constexpr auto tuple_to_laplace_options(Options&& ops) {
111 using Ops = std::decay_t<Options>;
112 if constexpr (is_tuple_v<Ops>) {
113 if constexpr (!is_eigen_v<std::tuple_element_t<0, std::decay_t<Ops>>>) {
114 static_assert(
115 sizeof(std::decay_t<Ops>*) == 0,
116 "ERROR:(laplace_marginal_lpdf) The first laplace argument is "
117 "expected to be an Eigen vector of dynamic size representing the "
118 "initial theta_0.");
119 }
120 if constexpr (!stan::is_inner_tuple_type_v<1, Ops, double>) {
121 static_assert(
122 sizeof(std::decay_t<Ops>*) == 0,
123 "ERROR:(laplace_marginal_lpdf) The second laplace argument is "
124 "expected to be a double representing the tolerance.");
125 }
126 if constexpr (!stan::is_inner_tuple_type_v<2, Ops, int>) {
127 static_assert(
128 sizeof(std::decay_t<Ops>*) == 0,
129 "ERROR:(laplace_marginal_lpdf) The third laplace argument is "
130 "expected to be an int representing the maximum number of steps for "
131 "the laplace approximation.");
132 }
133 if constexpr (!stan::is_inner_tuple_type_v<3, Ops, int>) {
134 static_assert(
135 sizeof(std::decay_t<Ops>*) == 0,
136 "ERROR:(laplace_marginal_lpdf) The fourth laplace argument is "
137 "expected to be an int representing the solver.");
138 }
139 if constexpr (!stan::is_inner_tuple_type_v<4, Ops, int>) {
140 static_assert(
141 sizeof(std::decay_t<Ops>*) == 0,
142 "ERROR:(laplace_marginal_lpdf) The fifth laplace argument is "
143 "expected to be an int representing the max steps for the laplace "
144 "approximaton's wolfe line search.");
145 }
146 constexpr bool is_fallthrough
148 5, Ops, int> || stan::is_inner_tuple_type_v<5, Ops, bool>;
149 if constexpr (!is_fallthrough) {
150 static_assert(
151 sizeof(std::decay_t<Ops>*) == 0,
152 "ERROR:(laplace_marginal_lpdf) The sixth laplace argument is "
153 "expected to be an int representing allow fallthrough (0/1).");
154 }
155 auto defaults = laplace_options_default{};
157 value_of(std::get<0>(std::forward<Ops>(ops))),
158 std::get<1>(ops),
159 std::get<2>(ops),
160 defaults.hessian_block_size,
161 std::get<3>(ops),
162 std::get<4>(ops),
163 (std::get<5>(ops) > 0) ? true : false,
164 };
165 } else {
166 return std::forward<Ops>(ops);
167 }
168}
169
170template <typename ThetaVec, typename WR, typename L_t, typename A_vec,
171 typename ThetaGrad, typename LU_t, typename KRoot>
173 /* log marginal density */
174 double lmd{std::numeric_limits<double>::infinity()};
175 /* ThetaVec at the mode */
176 ThetaVec theta;
184 WR W_r;
191 L_t L;
196 A_vec a;
198 ThetaGrad theta_grad;
199 /* LU matrix from solver 3 */
200 LU_t LU;
207 KRoot K_root;
209 laplace_density_estimates(double lmd_, ThetaVec&& theta_, WR&& W_r_, L_t&& L_,
210 A_vec&& a_, ThetaGrad&& theta_grad_, LU_t&& LU_,
211 KRoot&& K_root_, int solver_used_)
212 : lmd(lmd_),
213 theta(std::move(theta_)),
214 W_r(std::move(W_r_)),
215 L(std::move(L_)),
216 a(std::move(a_)),
217 theta_grad(std::move(theta_grad_)),
218 LU(std::move(LU_)),
219 K_root(std::move(K_root_)),
220 solver_used(solver_used_) {}
221};
222
230template <typename WRootMat>
231inline void block_matrix_sqrt(WRootMat& W_root,
232 const Eigen::SparseMatrix<double>& W,
233 const Eigen::Index block_size) {
234 int n_block = W.cols() / block_size;
235 Eigen::MatrixXd local_block(block_size, block_size);
236 Eigen::MatrixXd local_block_sqrt(block_size, block_size);
237 Eigen::MatrixXd sqrt_t_mat = Eigen::MatrixXd::Zero(block_size, block_size);
238 // No block operation available for sparse matrices, so we have to loop
239 // See https://eigen.tuxfamily.org/dox/group__TutorialSparse.html#title7
240 for (int i = 0; i < n_block; i++) {
241 sqrt_t_mat.setZero();
242 local_block
243 = W.block(i * block_size, i * block_size, block_size, block_size);
244 if (!local_block.array().isFinite().any()) {
245 throw std::domain_error(
246 std::string("Error in block_matrix_sqrt: "
247 "NaNs detected in block diagonal starting at (")
248 + std::to_string(i) + ", " + std::to_string(i) + ")");
249 }
250 // Issue here, sqrt is done over T of the complex schur
251 Eigen::RealSchur<Eigen::MatrixXd> schurOfA(local_block);
252 // Compute Schur decomposition of arg
253 const auto& t_mat = schurOfA.matrixT();
254 const auto& u_mat = schurOfA.matrixU();
255 // Check if diagonal of schur is not positive
256 if ((t_mat.diagonal().array() < 0).any()) {
257 throw std::domain_error(
258 std::string("Error in block_matrix_sqrt: "
259 "values less than 0 detected in block diagonal's schur "
260 "decomposition starting at (")
261 + std::to_string(i) + ", " + std::to_string(i) + ")");
262 }
263 try {
264 // Compute square root of T
265 Eigen::matrix_sqrt_quasi_triangular(t_mat, sqrt_t_mat);
266 // Compute square root of arg
267 local_block_sqrt = u_mat * sqrt_t_mat * u_mat.adjoint();
268 } catch (const std::exception& e) {
269 throw std::domain_error(
270 "Error in block_matrix_sqrt: "
271 "The matrix is not positive definite");
272 }
273 for (int k = 0; k < block_size; k++) {
274 for (int j = 0; j < block_size; j++) {
275 W_root.coeffRef(i * block_size + j, i * block_size + k)
276 = local_block_sqrt(j, k);
277 }
278 }
279 }
280}
281
291template <bool InitTheta, typename CovarMat>
292inline void validate_laplace_options(const char* frame_name,
293 const laplace_options<InitTheta>& options,
294 const CovarMat& covariance) {
295 if constexpr (InitTheta) {
296 check_nonzero_size(frame_name, "initial guess", options.theta_0);
297 check_finite(frame_name, "initial guess", options.theta_0);
298 if (unlikely(options.theta_0.size() != covariance.rows())) {
299 std::stringstream msg;
300 msg << frame_name << ": The size of the initial theta ("
301 << options.theta_0.size()
302 << ") vector must match the rows and columns of the covariance "
303 "matrix ("
304 << covariance.rows() << ", " << covariance.cols() << ").";
305 throw std::domain_error(msg.str());
306 }
307 }
308 check_nonnegative(frame_name, "tolerance", options.tolerance);
309 check_positive(frame_name, "max_num_steps", options.max_num_steps);
310 check_positive(frame_name, "hessian_block_size", options.hessian_block_size);
311 check_square(frame_name, "covariance", covariance);
312
313 const Eigen::Index theta_size = covariance.rows();
314 if (unlikely(theta_size % options.hessian_block_size != 0
315 || theta_size < options.hessian_block_size)) {
316 throw std::domain_error(
317 "laplace_marginal_density: Hessian block size mismatch.");
318 }
319
320 if (unlikely(options.solver < 1 || options.solver > 3)) {
321 throw std::domain_error(
322 "laplace_marginal_density: solver must be 1, 2, or 3. Got: "
323 + std::to_string(options.solver));
324 }
325}
326
341
344
347
349 Eigen::VectorXd b;
350
352 Eigen::MatrixXd B;
353
355 Eigen::VectorXd prev_g;
363 bool final_loop = false;
364
382 template <typename ObjFun, typename ThetaGradFun, typename CovarianceT,
383 typename ThetaInitializer>
384 NewtonState(int theta_size, ObjFun&& obj_fun, ThetaGradFun&& theta_grad_f,
385 CovarianceT&& covariance, ThetaInitializer&& theta_init)
386 : wolfe_info(std::forward<ObjFun>(obj_fun),
387 covariance.llt().solve(theta_init),
388 std::forward<ThetaInitializer>(theta_init),
389 std::forward<ThetaGradFun>(theta_grad_f)),
390 proposal(theta_size),
391 b(theta_size),
392 B(theta_size, theta_size),
393 prev_g(theta_size) {
394 wolfe_status.num_backtracks_ = -1; // Safe initial value for BB step
395 }
396
401 auto& curr() & { return wolfe_info.curr_; }
402
407 const auto& curr() const& { return wolfe_info.curr_; }
408 auto&& curr() && { return std::move(wolfe_info).curr(); }
413 auto& prev() & { return wolfe_info.prev_; }
414
419 const auto& prev() const& { return wolfe_info.prev_; }
420 auto&& prev() && { return std::move(wolfe_info).prev(); }
421 auto& proposal_step() & { return proposal; }
422 const auto& proposal_step() const& { return proposal; }
423 auto&& proposal_step() && { return std::move(proposal); }
424 template <typename Options>
425 inline void update_next_step(const Options& options) {
426 this->prev().swap(this->curr());
427 this->curr().alpha()
428 = std::clamp(this->curr().alpha(), 0.0, options.line_search.max_alpha);
429 }
430};
431
441template <typename LLT, typename B_t>
442inline void llt_with_jitter(LLT& llt_B, B_t& B, double min_jitter = 1e-10,
443 double max_jitter = 1e-5) {
444 llt_B.compute(B);
445 if (llt_B.info() != Eigen::Success) {
446 double prev_jitter = 0.0;
447 double jitter_try = min_jitter;
448 for (; jitter_try < max_jitter; jitter_try *= 10) {
449 // Remove previously added jitter before adding the new (larger) amount,
450 // so that the total diagonal perturbation is exactly jitter_try.
451 B.diagonal().array() += (jitter_try - prev_jitter);
452 prev_jitter = jitter_try;
453 llt_B.compute(B);
454 if (llt_B.info() == Eigen::Success) {
455 break;
456 }
457 }
458 if (llt_B.info() != Eigen::Success) {
459 throw std::domain_error(
460 "laplace_marginal_density: Cholesky failed after adding jitter up to "
461 + std::to_string(jitter_try));
462 }
463 }
464}
465
481 Eigen::VectorXd W_r_diag;
482
484 Eigen::VectorXd W_diag;
485
487 Eigen::LLT<Eigen::MatrixXd> llt_B;
488
489 template <typename NewtonStateT, typename CovarMat>
490 CholeskyWSolverDiag(const NewtonStateT& state, const CovarMat& covariance)
491 : W_r_diag(Eigen::VectorXd::Zero(state.b.size())), W_diag(0), llt_B() {}
512 template <typename NewtonStateT, typename LLFun, typename LLTupleArgs,
513 typename CovarMat>
514 void solve_step(NewtonStateT& state, const LLFun& ll_fun,
515 const LLTupleArgs& ll_args, const CovarMat& covariance,
516 int /*hessian_block_size*/, std::ostream* msgs) {
517 const Eigen::Index theta_size = state.b.size();
518
519 // 1. Compute diagonal Hessian
520 W_diag = laplace_likelihood::diagonal_hessian(ll_fun, state.prev().theta(),
521 ll_args, msgs);
522 for (Eigen::Index j = 0; j < W_diag.size(); j++) {
523 if (W_diag.coeff(j) < 0 || !std::isfinite(W_diag.coeff(j))) {
524 throw std::domain_error(
525 "laplace_marginal_density: Hessian matrix is not positive "
526 "definite");
527 } else {
528 W_r_diag.coeffRef(j) = std::sqrt(W_diag.coeff(j));
529 }
530 }
531
532 // 2. Formulate B = I + W_r * Sigma * W_r
533 state.B.noalias()
534 = Eigen::MatrixXd::Identity(theta_size, theta_size)
535 + W_r_diag.asDiagonal() * covariance * W_r_diag.asDiagonal();
536
537 // 3. Factorize B with jittering fallback
538 llt_with_jitter(llt_B, state.B);
539 // 4. Solve for the raw Newton proposal in a-space.
540 state.b.noalias() = (W_diag.array() * state.prev().theta().array()).matrix()
541 + state.prev().theta_grad();
542 auto L = llt_B.matrixL();
543 auto LT = llt_B.matrixU();
544 state.proposal_step().a().noalias()
545 = state.b
546 - W_r_diag.asDiagonal()
547 * LT.solve(
548 L.solve(W_r_diag.cwiseProduct(covariance * state.b)));
549 }
550
555 double compute_log_determinant() const {
556 return 2.0 * llt_B.matrixLLT().diagonal().array().log().sum();
557 }
558
567 template <typename NewtonStateT>
568 auto build_result(NewtonStateT& state, double log_det) {
570 state.prev().obj() - 0.5 * log_det,
571 std::move(state).prev().theta(),
572 Eigen::SparseMatrix<double>(W_r_diag.asDiagonal()),
573 Eigen::MatrixXd(llt_B.matrixL()),
574 std::move(state).prev().a(),
575 std::move(state).prev().theta_grad(),
576 Eigen::PartialPivLU<Eigen::MatrixXd>{},
577 Eigen::MatrixXd(0, 0),
578 1};
579 }
580};
581
597 Eigen::SparseMatrix<double> W_r;
598
600 Eigen::SparseMatrix<double> W_block;
601
603 Eigen::LLT<Eigen::MatrixXd> llt_B;
604
605 template <typename NewtonStateT>
606 CholeskyWSolverBlock(const NewtonStateT& state, int hessian_block_size)
607 : W_r(state.b.size(), state.b.size()) {
608 const Eigen::Index theta_size = state.b.size();
609 W_r.reserve(Eigen::VectorXi::Constant(theta_size, hessian_block_size));
610 const Eigen::Index n_block = theta_size / hessian_block_size;
611 for (Eigen::Index ii = 0; ii < n_block; ii++) {
612 for (Eigen::Index k = 0; k < hessian_block_size; k++) {
613 for (Eigen::Index j = 0; j < hessian_block_size; j++) {
614 W_r.insert(ii * hessian_block_size + j, ii * hessian_block_size + k)
615 = 1.0;
616 }
617 }
618 }
619 W_r.makeCompressed();
620 }
621
644 template <typename NewtonStateT, typename LLFun, typename LLTupleArgs,
645 typename CovarMat>
646 void solve_step(NewtonStateT& state, const LLFun& ll_fun,
647 const LLTupleArgs& ll_args, const CovarMat& covariance,
648 int hessian_block_size, std::ostream* msgs) {
649 const Eigen::Index theta_size = state.b.size();
650 // 1. Compute block Hessian
652 ll_fun, state.prev().theta(), hessian_block_size, ll_args, msgs);
653
654 for (Eigen::Index j = 0; j < W_block.rows(); j++) {
655 if (W_block.coeff(j, j) < 0 || !std::isfinite(W_block.coeff(j, j))) {
656 throw std::domain_error(
657 "laplace_marginal_density: Hessian matrix is not positive "
658 "definite");
659 }
660 }
661
662 // 2. Compute W_r = sqrt(W)
663 block_matrix_sqrt(W_r, W_block, hessian_block_size);
664
665 // 3. Formulate B = I + W_r * Sigma * W_r
666 state.B.noalias() = Eigen::MatrixXd::Identity(theta_size, theta_size)
667 + W_r * (covariance * W_r);
668
669 // 4. Factorize B with jittering fallback
670 llt_with_jitter(llt_B, state.B);
671
672 // 5. Solve for the raw Newton proposal in a-space.
673 state.b.noalias()
674 = W_block * state.prev().theta() + state.prev().theta_grad();
675 auto L = llt_B.matrixL();
676 auto LT = llt_B.matrixU();
677 state.proposal_step().a().noalias()
678 = state.b - W_r * LT.solve(L.solve(W_r * (covariance * state.b)));
679 }
680
685 double compute_log_determinant() const {
686 return 2.0 * llt_B.matrixLLT().diagonal().array().log().sum();
687 }
688
697 template <typename NewtonStateT>
698 auto build_result(NewtonStateT& state, double log_det) {
699 return laplace_density_estimates{state.prev().obj() - 0.5 * log_det,
700 std::move(state).prev().theta(),
701 std::move(W_r),
702 Eigen::MatrixXd(llt_B.matrixL()),
703 std::move(state).prev().a(),
704 std::move(state).prev().theta_grad(),
705 Eigen::PartialPivLU<Eigen::MatrixXd>{},
706 Eigen::MatrixXd(0, 0),
707 1};
708 }
709};
710
725 Eigen::MatrixXd K_root;
726
728 Eigen::SparseMatrix<double> W_full;
729
731 Eigen::LLT<Eigen::MatrixXd> llt_B;
732
733 template <typename NewtonStateT, typename CovarMat>
734 CholeskyKSolver(const NewtonStateT& state, const CovarMat& covariance)
735 : K_root(0, 0), W_full(0, 0), llt_B() {
736 auto K_root_llt = covariance.template selfadjointView<Eigen::Lower>().llt();
737 if (K_root_llt.info() != Eigen::Success) {
738 throw std::domain_error(
739 "laplace_marginal_density: Cholesky of covariance failed at start");
740 }
741 K_root = std::move(K_root_llt.matrixL());
742 }
743
764 template <typename NewtonStateT, typename LLFun, typename LLTupleArgs,
765 typename CovarMat>
766 void solve_step(NewtonStateT& state, const LLFun& ll_fun,
767 const LLTupleArgs& ll_args, const CovarMat& covariance,
768 int hessian_block_size, std::ostream* msgs) {
769 const Eigen::Index theta_size = state.b.size();
770
771 // 1. Compute Hessian
773 ll_fun, state.prev().theta(), hessian_block_size, ll_args, msgs);
774
775 // 2. Formulate B = I + K^T * W * K
776 state.B.noalias() = Eigen::MatrixXd::Identity(theta_size, theta_size)
777 + K_root.transpose() * (W_full * K_root);
778
779 // 3. Factorize B with jittering fallback
780 llt_with_jitter(llt_B, state.B);
781
782 // 4. Solve for the raw Newton proposal in a-space.
783 state.b.noalias()
784 = W_full * state.prev().theta() + state.prev().theta_grad();
785 auto L = llt_B.matrixL();
786 auto LT = llt_B.matrixU();
787 state.proposal_step().a().noalias()
788 = K_root.transpose().template triangularView<Eigen::Upper>().solve(
789 LT.solve(L.solve(K_root.transpose() * state.b)));
790 }
791
796 double compute_log_determinant() const {
797 return 2.0 * llt_B.matrixLLT().diagonal().array().log().sum();
798 }
799
808 template <typename NewtonStateT>
809 auto build_result(NewtonStateT& state, double log_det) {
810 return laplace_density_estimates{state.prev().obj() - 0.5 * log_det,
811 std::move(state.prev().theta()),
812 std::move(W_full),
813 Eigen::MatrixXd(llt_B.matrixL()),
814 std::move(state.prev().a()),
815 std::move(state.prev().theta_grad()),
816 Eigen::PartialPivLU<Eigen::MatrixXd>{},
817 std::move(K_root),
818 2};
819 }
820};
821
835struct LUSolver {
837 Eigen::PartialPivLU<Eigen::MatrixXd> lu;
838
840 Eigen::SparseMatrix<double> W_full;
841
859 template <typename NewtonStateT, typename LLFun, typename LLTupleArgs,
860 typename CovarMat>
861 void solve_step(NewtonStateT& state, const LLFun& ll_fun,
862 const LLTupleArgs& ll_args, const CovarMat& covariance,
863 int hessian_block_size, std::ostream* msgs) {
864 const Eigen::Index theta_size = state.b.size();
865
866 // 1. Compute Hessian
868 ll_fun, state.prev().theta(), hessian_block_size, ll_args, msgs);
869
870 // 2. Factorize B = I + Sigma * W
871 lu.compute(Eigen::MatrixXd::Identity(theta_size, theta_size)
872 + covariance * W_full);
873
874 // 3. Solve for the raw Newton proposal in a-space.
875 state.b.noalias()
876 = W_full * state.prev().theta() + state.prev().theta_grad();
877 state.proposal_step().a().noalias()
878 = state.b - W_full * lu.solve(covariance * state.b);
879 }
880
890 double compute_log_determinant() const {
891 return lu.matrixLU().diagonal().array().log().sum();
892 }
893
902 template <typename NewtonStateT>
903 auto build_result(NewtonStateT& state, double log_det) {
904 return laplace_density_estimates{state.prev().obj() - 0.5 * log_det,
905 std::move(state).prev().theta(),
906 std::move(W_full),
907 Eigen::MatrixXd(0, 0),
908 std::move(state).prev().a(),
909 std::move(state).prev().theta_grad(),
910 std::move(lu),
911 Eigen::MatrixXd(0, 0),
912 3};
913 }
914};
915
938template <typename SolverPolicy, typename NewtonStateT, typename OptionsT,
939 typename LLFunT, typename LLTupleArgsT, typename CovarMatT,
940 typename UpdateFun>
941inline auto run_newton_loop(SolverPolicy& solver, NewtonStateT& state,
942 const OptionsT& options, Eigen::Index& step_iter,
943 const LLFunT& ll_fun, const LLTupleArgsT& ll_args,
944 const CovarMatT& covariance, UpdateFun&& update_fun,
945 std::ostream* msgs) {
946 bool finish_update = false;
947 for (; step_iter <= options.max_num_steps; step_iter++) {
948 solver.solve_step(state, ll_fun, ll_args, covariance,
949 options.hessian_block_size, msgs);
950 if (!state.final_loop) {
951 auto&& proposal = state.proposal_step();
952 state.wolfe_info.p_ = proposal.a() - state.prev().a();
953 state.prev_g.noalias() = -covariance * state.prev().a()
954 + covariance * state.prev().theta_grad();
955 state.wolfe_info.init_dir_ = state.prev_g.dot(state.wolfe_info.p_);
956 // Flip direction if not ascending
957 state.wolfe_info.flip_direction();
958 auto&& scratch = state.wolfe_info.scratch_;
959 proposal.eval_.alpha() = 1.0;
960 const bool proposal_valid
961 = update_fun(proposal, state.curr(), state.prev(), proposal.eval_,
962 state.wolfe_info.p_);
963 const bool cached_proposal_ok
964 = proposal_valid && std::isfinite(proposal.obj())
965 && std::isfinite(proposal.dir())
966 && proposal.alpha() > options.line_search.min_alpha;
967 if (!cached_proposal_ok) {
968 state.wolfe_status
970 } else if (options.line_search.max_iterations == 0) {
971 state.curr().update(proposal);
972 state.wolfe_status = WolfeStatus{WolfeReturn::Continue, 1, 0, true};
973 } else {
974 Eigen::VectorXd s = proposal.a() - state.prev().a();
975 auto full_step_grad
976 = (-covariance * proposal.a() + covariance * proposal.theta_grad())
977 .eval();
978 state.curr().alpha() = barzilai_borwein_step_size(
979 s, full_step_grad, state.prev_g, state.prev().alpha(),
980 state.wolfe_status.num_backtracks_, options.line_search.min_alpha,
981 options.line_search.max_alpha);
982 state.wolfe_status = internal::wolfe_line_search(
983 state.wolfe_info, update_fun, options.line_search, msgs);
984 }
985 bool search_failed = !state.wolfe_status.accept_;
986 const bool proposal_armijo_ok
987 = cached_proposal_ok
989 proposal.obj(), state.prev().obj(), proposal.alpha(),
990 state.wolfe_info.init_dir_, options.line_search);
991 if (search_failed && proposal_armijo_ok) {
992 state.curr().update(proposal);
993 state.wolfe_status
994 = WolfeStatus{WolfeReturn::Armijo, state.wolfe_status.num_evals_,
995 state.wolfe_status.num_backtracks_, true};
996 search_failed = false;
997 }
998 bool objective_converged
999 = state.wolfe_status.accept_
1000 && std::abs(state.curr().obj() - state.prev().obj())
1001 < options.tolerance;
1002 finish_update = objective_converged || search_failed;
1003 }
1004 if (finish_update) {
1005 if (!state.final_loop && state.wolfe_status.accept_) {
1006 // Do one final loop with exact wolfe conditions
1007 state.final_loop = true;
1008 state.update_next_step(options);
1009 continue;
1010 }
1011 return solver.build_result(state, solver.compute_log_determinant());
1012 } else {
1013 state.update_next_step(options);
1014 }
1015 }
1016 if (msgs) {
1017 (*msgs)
1018 << std::string(
1019 "WARNING(laplace_marginal_density): max number of iterations: ")
1020 + std::to_string(options.max_num_steps) + " exceeded.";
1021 }
1022 return solver.build_result(state, solver.compute_log_determinant());
1023}
1024
1035inline void log_solver_fallback(const bool allow_fallthrough,
1036 std::ostream* msgs, std::string_view context,
1037 Eigen::Index iter,
1038 std::string_view failed_solver,
1039 std::string_view next_solver,
1040 const std::exception& e) {
1041 // Build once so we don't interleave with other logs.
1042 std::ostringstream os;
1043 std::string msg_type = allow_fallthrough ? "WARNING" : "ERROR";
1044 os << "[" << context << "] " << msg_type << ": solver fallback\n"
1045 << " " << std::left << std::setw(12) << "iteration:" << iter << "\n"
1046 << " " << std::left << std::setw(12) << "failed:" << failed_solver << "\n"
1047 << " " << std::left << std::setw(12) << "reason:" << e.what() << "\n"
1048 << " " << std::left << std::setw(12) << "action:"
1049 << "trying " << next_solver << "\n"
1050 << "note: this warning message will only be displayed once."
1051 << "\n";
1052 if (allow_fallthrough && msgs) {
1053 (*msgs) << os.str();
1054 } else {
1055 throw std::domain_error(std::string("[") + std::string(context) + "]");
1056 }
1057}
1058
1059template <bool InitTheta, typename Opts>
1060inline decltype(auto) theta_init_impl(Eigen::Index theta_size, Opts&& options) {
1061 if constexpr (InitTheta) {
1062 // If requested, use the prior mean as the initial value
1063 return std::decay_t<decltype(options)>(options).theta_0;
1064 } else {
1065 return Eigen::MatrixXd::Zero(theta_size, 1);
1066 }
1067}
1068
1087template <typename ObjFun, typename ThetaGradFun, typename Covariance,
1088 typename Options>
1089inline auto create_update_fun(ObjFun&& obj_fun, ThetaGradFun&& theta_grad_f,
1090 Covariance&& covariance, Options&& options) {
1091 auto update_step = [&covariance, &obj_fun, &theta_grad_f](
1092 auto& proposal, auto&& /* curr */, auto&& prev,
1093 auto& eval_in, auto&& p) {
1094 try {
1095 proposal.a() = prev.a() + eval_in.alpha() * p;
1096 proposal.theta().noalias() = covariance * proposal.a();
1097 proposal.theta_grad() = theta_grad_f(proposal.theta());
1098 eval_in.obj() = obj_fun(proposal.a(), proposal.theta());
1099 eval_in.dir()
1100 = (-covariance * proposal.a() + covariance * proposal.theta_grad())
1101 .dot(p);
1102 return std::isfinite(eval_in.obj()) && std::isfinite(eval_in.dir());
1103 } catch (const std::exception&) {
1104 return false;
1105 }
1106 };
1107 auto backoff = [&options](auto& eval) {
1108 eval.alpha() *= options.line_search.tau;
1109 return eval.alpha() > options.line_search.min_alpha;
1110 };
1111 return
1112 [update_step_ = std::move(update_step), backoff_ = std::move(backoff)](
1113 auto& proposal, auto&& curr, auto&& prev, auto& eval_in, auto&& p) {
1114 return internal::retry_evaluate(update_step_, proposal, curr, prev,
1115 eval_in, p, backoff_);
1116 };
1117}
1118
1171template <typename LLFun, typename LLTupleArgs, typename CovarMat,
1172 bool InitTheta,
1175 LLFun&& ll_fun, LLTupleArgs&& ll_args, CovarMat&& covariance,
1176 const laplace_options<InitTheta>& options, std::ostream* msgs) {
1177 internal::validate_laplace_options("laplace_marginal_density", options,
1178 covariance);
1179 const Eigen::Index theta_size = covariance.rows();
1180 // Wolfe optimizes over the latent 'a' space
1181 auto obj_fun = [&ll_fun, &ll_args, &msgs](const Eigen::VectorXd& a_val,
1182 auto&& theta_val) -> double {
1183 return -0.5 * a_val.dot(theta_val)
1184 + laplace_likelihood::log_likelihood(ll_fun, theta_val, ll_args,
1185 msgs);
1186 };
1187 auto theta_grad_f = [&ll_fun, &ll_args, &msgs](auto&& theta_val) {
1188 return laplace_likelihood::theta_grad(ll_fun, theta_val, ll_args, msgs);
1189 };
1190 decltype(auto) theta_init = theta_init_impl<InitTheta>(theta_size, options);
1191 // When the user supplies a non-zero theta_init, we must initialise a
1192 // consistently so that the invariant theta = Sigma * a holds. Otherwise
1193 // the prior term -0.5 * a'*theta vanishes (a=0 while theta!=0), inflating
1194 // the initial objective and causing the Wolfe line search to reject the
1195 // first Newton step.
1196 auto state
1197 = NewtonState(theta_size, obj_fun, theta_grad_f, covariance, theta_init);
1198 // Start with safe step size
1199 auto update_fun = create_update_fun(
1200 std::move(obj_fun), std::move(theta_grad_f), covariance, options);
1201 Eigen::Index step_iter = 0;
1202 try {
1203 if (options.solver == 1) {
1204 if (options.hessian_block_size == 1) {
1205 CholeskyWSolverDiag solver(state, covariance);
1206 return run_newton_loop(solver, state, options, step_iter, ll_fun,
1207 ll_args, covariance, update_fun, msgs);
1208 } else {
1209 CholeskyWSolverBlock solver(state, options.hessian_block_size);
1210 return run_newton_loop(solver, state, options, step_iter, ll_fun,
1211 ll_args, covariance, update_fun, msgs);
1212 }
1213 }
1214 } catch (const std::exception& e) {
1215 const std::string solver_type
1216 = (options.hessian_block_size == 1) ? "Diagonal" : "Block";
1217 std::string failed = "solver 1 (" + solver_type + " Hessian-root Cholesky)";
1218 std::call_once(
1220 [](auto&&... args) {
1221 log_solver_fallback(std::forward<decltype(args)>(args)...);
1222 },
1223 options.allow_fallthrough, msgs, "laplace_marginal_density", step_iter,
1224 std::move(failed), "solver 2 (Covariance-root Cholesky)", e);
1225 }
1226 try {
1227 if (options.solver == 2 || options.allow_fallthrough) {
1228 CholeskyKSolver solver(state, covariance);
1229 return run_newton_loop(solver, state, options, step_iter, ll_fun, ll_args,
1230 covariance, update_fun, msgs);
1231 }
1232 } catch (const std::exception& e) {
1233 std::call_once(
1235 [](auto&&... args) {
1236 log_solver_fallback(std::forward<decltype(args)>(args)...);
1237 },
1238 options.allow_fallthrough, msgs, "laplace_marginal_density", step_iter,
1239 "solver 2 (Covariance-root Cholesky)", "solver 3 (General LU solver)",
1240 e);
1241 }
1242 if (options.solver == 3 || options.allow_fallthrough) {
1243 LUSolver solver;
1244 return run_newton_loop(solver, state, options, step_iter, ll_fun, ll_args,
1245 covariance, update_fun, msgs);
1246 }
1247 throw std::domain_error(
1248 std::string("You chose a solver (") + std::to_string(options.solver)
1249 + ") that is not valid. Please choose either 1, 2, or 3.");
1250}
1251} // namespace internal
1252} // namespace math
1253} // namespace stan
1254#endif
#define STAN_THREADS_DEF
#define unlikely(x)
int64_t size(const T &m)
Returns the size (number of the elements) of a matrix_cl or var_value<matrix_cl<T>>.
Definition size.hpp:19
(Expert) Numerical traits for algorithmic differentiation variables.
WolfeStatus wolfe_line_search(Info &wolfe_info, UpdateFun &&update_fun, Options &&opt, Stream *msgs)
Strong Wolfe line search for maximization.
auto create_update_fun(ObjFun &&obj_fun, ThetaGradFun &&theta_grad_f, Covariance &&covariance, Options &&options)
Create the update function for the line search, capturing necessary references.
auto run_newton_loop(SolverPolicy &solver, NewtonStateT &state, const OptionsT &options, Eigen::Index &step_iter, const LLFunT &ll_fun, const LLTupleArgsT &ll_args, const CovarMatT &covariance, UpdateFun &&update_fun, std::ostream *msgs)
Run a Newton loop with a solver policy, updating the shared state.
double barzilai_borwein_step_size(const Eigen::VectorXd &s, const Eigen::VectorXd &g_curr, const Eigen::VectorXd &g_prev, double prev_step, int last_backtracks, double min_alpha, double max_alpha)
Curvature-aware Barzilai–Borwein (BB) step length with robust safeguards.
void log_solver_fallback(const bool allow_fallthrough, std::ostream *msgs, std::string_view context, Eigen::Index iter, std::string_view failed_solver, std::string_view next_solver, const std::exception &e)
Log a solver fallback event to the provided stream.
decltype(auto) theta_init_impl(Eigen::Index theta_size, Opts &&options)
auto check_armijo(double obj_next, double obj_init, double alpha_next, double dir0, Option &&opt)
constexpr double laplace_default_tolerance
constexpr auto tuple_to_laplace_options(Options &&ops)
constexpr int laplace_default_max_steps_line_search
constexpr int laplace_default_hessian_block_size
void validate_laplace_options(const char *frame_name, const laplace_options< InitTheta > &options, const CovarMat &covariance)
Validates the options for the Laplace approximation.
constexpr int laplace_default_allow_fallthrough
auto retry_evaluate(Update &&update, Proposal &&proposal, Curr &&curr, Prev &&prev, Eval &eval, P &&p, Backoff &&backoff)
Retry evaluation of a step until it passes a validity check.
auto laplace_marginal_density_est(LLFun &&ll_fun, LLTupleArgs &&ll_args, CovarMat &&covariance, const laplace_options< InitTheta > &options, std::ostream *msgs)
For a latent Gaussian model with hyperparameters phi and latent variables theta, and observations y,...
void llt_with_jitter(LLT &llt_B, B_t &B, double min_jitter=1e-10, double max_jitter=1e-5)
Factorize B with jittering fallback.
void block_matrix_sqrt(WRootMat &W_root, const Eigen::SparseMatrix< double > &W, const Eigen::Index block_size)
Returns the principal square root of a block diagonal matrix.
static thread_local std::once_flag fallback_warning
auto diagonal_hessian(F &&f, Theta &&theta, TupleArgs &&ll_tuple, Stream *msgs)
auto log_likelihood(F &&f, Theta &&theta, TupleArgs &&ll_tup, Stream *msgs)
A wrapper that accepts a tuple as arguments.
auto block_hessian(F &&f, Theta &&theta, const Eigen::Index hessian_block_size, TupleArgs &&ll_tuple, Stream *msgs)
auto theta_grad(F &&f, Theta &&theta, TupleArgs &&ll_tup, Stream *msgs=nullptr)
A wrapper that accepts a tuple as arguments.
void check_square(const char *function, const char *name, const T_y &y)
Check if the specified matrix is square.
void check_nonnegative(const char *function, const char *name, const T_y &y)
Check if y is non-negative.
static constexpr double e()
Return the base of the natural logarithm.
Definition constants.hpp:20
T eval(T &&arg)
Inputs which have a plain_type equal to the own time are forwarded unmodified (for Eigen expressions ...
Definition eval.hpp:20
T value_of(const fvar< T > &v)
Return the value of the specified variable.
Definition value_of.hpp:18
void check_finite(const char *function, const char *name, const T_y &y)
Return true if all values in y are finite.
void check_nonzero_size(const char *function, const char *name, const T_y &y)
Check if the specified matrix/vector is of non-zero size.
void check_positive(const char *function, const char *name, const T_y &y)
Check if y is positive.
double dot(const std::vector< double > &x, const std::vector< double > &y)
Definition dot.hpp:11
constexpr bool is_inner_tuple_type_v
Checks if the N-th element of a tuple is of the same type as CheckType.
Definition is_tuple.hpp:82
std::enable_if_t< Check::value > require_t
If condition is true, template is enabled.
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...
STL namespace.
void solve_step(NewtonStateT &state, const LLFun &ll_fun, const LLTupleArgs &ll_args, const CovarMat &covariance, int hessian_block_size, std::ostream *msgs)
Perform one Newton step using covariance Cholesky solver.
Eigen::MatrixXd K_root
Lower Cholesky factor of covariance: Sigma = K_root * K_root^T.
Eigen::LLT< Eigen::MatrixXd > llt_B
Cholesky factorization of B = I + K_root^T * W * K_root.
Eigen::SparseMatrix< double > W_full
Full (block) Hessian matrix from likelihood.
double compute_log_determinant() const
Compute log determinant of B from Cholesky factor.
auto build_result(NewtonStateT &state, double log_det)
Build the final result structure.
CholeskyKSolver(const NewtonStateT &state, const CovarMat &covariance)
Solver Policy 2: Cholesky decomposition of K (Covariance).
Eigen::LLT< Eigen::MatrixXd > llt_B
Cholesky factorization of B = I + W_r * Sigma * W_r.
Eigen::SparseMatrix< double > W_block
Sparse block-diagonal Hessian from likelihood.
Eigen::SparseMatrix< double > W_r
Sparse square root of block Hessian.
double compute_log_determinant() const
Compute log determinant of B from Cholesky factor.
void solve_step(NewtonStateT &state, const LLFun &ll_fun, const LLTupleArgs &ll_args, const CovarMat &covariance, int hessian_block_size, std::ostream *msgs)
Perform one Newton step using block-diagonal Hessian solver.
auto build_result(NewtonStateT &state, double log_det)
Build the final result structure.
CholeskyWSolverBlock(const NewtonStateT &state, int hessian_block_size)
Solver Policy 1 (Block): Cholesky decomposition using block W.
void solve_step(NewtonStateT &state, const LLFun &ll_fun, const LLTupleArgs &ll_args, const CovarMat &covariance, int, std::ostream *msgs)
Perform one Newton step using diagonal Hessian solver.
Eigen::LLT< Eigen::MatrixXd > llt_B
Cholesky factorization of B = I + W_r * Sigma * W_r.
CholeskyWSolverDiag(const NewtonStateT &state, const CovarMat &covariance)
Eigen::VectorXd W_r_diag
Square root of diagonal Hessian: W_r[j] = sqrt(W[j])
auto build_result(NewtonStateT &state, double log_det)
Build the final result structure.
Eigen::VectorXd W_diag
Diagonal Hessian values from the likelihood.
double compute_log_determinant() const
Compute log determinant of B from Cholesky factor.
Solver Policy 1 (Diagonal): Cholesky decomposition using W.
auto build_result(NewtonStateT &state, double log_det)
Build the final result structure.
void solve_step(NewtonStateT &state, const LLFun &ll_fun, const LLTupleArgs &ll_args, const CovarMat &covariance, int hessian_block_size, std::ostream *msgs)
Perform one Newton step using LU decomposition solver.
double compute_log_determinant() const
Compute log determinant from LU factorization.
Eigen::SparseMatrix< double > W_full
Full Hessian matrix from likelihood.
Eigen::PartialPivLU< Eigen::MatrixXd > lu
LU factorization of B = I + Sigma * W.
WolfeData proposal
Cached proposal evaluated before the Wolfe line search.
auto & prev() &
Access the previous step state (mutable).
Eigen::MatrixXd B
Workspace matrix: B = I + W_r * Sigma * W_r (or similar)
WolfeStatus wolfe_status
Status of the most recent Wolfe line search.
auto & curr() &
Access the current step state (mutable).
Eigen::VectorXd b
Workspace vector: b = W * theta + grad(log_lik)
WolfeInfo wolfe_info
Wolfe line search state including current/previous steps.
const auto & curr() const &
Access the current step state (const).
NewtonState(int theta_size, ObjFun &&obj_fun, ThetaGradFun &&theta_grad_f, CovarianceT &&covariance, ThetaInitializer &&theta_init)
Constructs Newton state with a consistent (a_init, theta_init) pair.
Eigen::VectorXd prev_g
Previous gradient for Barzilai-Borwein step calculation.
bool final_loop
On the final loop if we found a better wolfe step, but we are going to exit, we want to make sure all...
const auto & prev() const &
Access the previous step state (const).
Holds the state for the Newton-Raphson optimization loop.
Data used in current evaluation of wolfe line search at a particular stepsize.
Data object used in wolfe line search.
Struct to hold the result status of the Wolfe line search.
L_t L
Solver-dependent factorization of the system matrix B.
KRoot K_root
Lower Cholesky factor of the covariance matrix.
A_vec a
Mode in the a parameterization, where theta = covariance * a.
ThetaGrad theta_grad
Gradient of the log-likelihood with respect to theta at the mode.
laplace_density_estimates(double lmd_, ThetaVec &&theta_, WR &&W_r_, L_t &&L_, A_vec &&a_, ThetaGrad &&theta_grad_, LU_t &&LU_, KRoot &&K_root_, int solver_used_)
Options for Wolfe line search during optimization.
laplace_options(ThetaVec &&theta_0_, double tolerance_, int max_num_steps_, int hessian_block_size_, int solver_, int max_steps_line_search_, bool allow_fallthrough_)
double tolerance
Iterations end when the absolute change in the optimization objective is less than this tolerance.
int solver
Which linear solver to use inside the Newton step.
laplace_options_base(int hessian_block_size_, int solver_, double tolerance_, int max_num_steps_, bool allow_fallthrough_, int max_steps_line_search_)
Options for the Laplace approximation.