1#ifndef STAN_MATH_MIX_FUNCTOR_LAPLACE_MARGINAL_DENSITY_HPP
2#define STAN_MATH_MIX_FUNCTOR_LAPLACE_MARGINAL_DENSITY_HPP
13#include <unsupported/Eigen/MatrixFunctions>
54 typename LLFun,
typename LLTupleArgs,
typename CovarFun,
typename CovarArgs,
56 require_t<is_all_arithmetic_scalar<CovarArgs, LLTupleArgs>>* =
nullptr>
58 CovarFun&& covariance_function,
59 CovarArgs&& covar_args,
63 [msgs, &covariance_function](
auto&&... args) {
64 return covariance_function(std::forward<
decltype(args)>(args)..., msgs);
66 std::forward<CovarArgs>(covar_args));
68 std::forward<LLFun>(ll_fun), std::forward<LLTupleArgs>(ll_args),
69 std::move(covariance), options, msgs)
75template <
bool ZeroInput =
false,
typename Output,
typename Input,
76 require_tuple_t<Output>* =
nullptr, require_tuple_t<Input>* =
nullptr,
78 std::tuple_size_v<std::decay_t<Output>> == 0>>* =
nullptr,
80 std::tuple_size_v<std::decay_t<Input>> == 0>>* =
nullptr>
91template <
bool ZeroInput =
false,
typename Output,
typename Input,
95 if constexpr (is_tuple_v<Output> && is_tuple_v<Input>) {
97 std::tuple_size<std::decay_t<Output>>::value
98 == std::tuple_size<std::decay_t<Input>>::value,
99 "INTERNAL ERROR:(laplace_marginal_lpdf) copy_compute_s2 called on "
100 "tuples of different sizes. This is an internal error, please report "
102 "https://github.com/stan-dev/math/issues");
105 [](
auto&& output_i,
auto&& input_i) {
106 using output_i_t = std::decay_t<
decltype(output_i)>;
107 if constexpr (is_std_vector_v<output_i_t>) {
108 using dbl_map_t = Eigen::Map<Eigen::Matrix<double, -1, 1>>;
109 using var_map_t = Eigen::Map<Eigen::Matrix<
var, -1, 1>>;
110 var_map_t input_map(input_i.data(), input_i.size());
111 dbl_map_t(output_i.data(), output_i.size()).array()
112 += 0.5 * input_map.adj().array();
113 if constexpr (ZeroInput) {
114 input_map.adj().setZero();
116 }
else if constexpr (is_eigen_v<output_i_t>) {
117 output_i.array() += 0.5 * input_i.adj().array();
118 if constexpr (ZeroInput) {
119 input_i.adj().setZero();
121 }
else if constexpr (is_stan_scalar_v<output_i_t>) {
122 output_i += (0.5 * input_i.adj());
123 if constexpr (ZeroInput) {
128 sizeof(std::decay_t<output_i_t>*) == 0,
129 "INTERNAL ERROR:(laplace_marginal_lpdf) copy_compute_s2 was "
130 "not able to deduce the actions needed for the given type. "
131 "This is an internal error, please report it: "
132 "https://github.com/stan-dev/math/issues");
135 std::forward<Output>(output), std::forward<Input>(input));
148template <
typename Output,
typename Input>
151 if constexpr (is_tuple_v<Output>) {
153 [ret](
auto&& inner_arg,
auto&& inner_input)
mutable {
155 ret, std::forward<
decltype(inner_arg)>(inner_arg),
156 std::forward<
decltype(inner_input)>(inner_input));
158 std::forward<Output>(output), std::forward<Input>(input));
159 }
else if constexpr (is_std_vector_containing_tuple_v<Output>) {
160 for (std::size_t i = 0; i < output.size(); ++i) {
165 [vi = ret.vi_, arg_arena =
to_arena(std::forward<Output>(output)),
166 input_arena =
to_arena(std::forward<Input>(input))]()
mutable {
201template <
typename LLFun,
typename LLTupleArgs,
typename CovarFun,
202 typename CovarArgs,
bool InitTheta,
205 CovarFun&& covariance_function,
206 CovarArgs&& covar_args,
207 const laplace_options<InitTheta>& options,
208 std::ostream* msgs) {
209 auto covar_args_refs =
to_ref(std::forward<CovarArgs>(covar_args));
210 auto ll_args_refs =
to_ref(std::forward<LLTupleArgs>(ll_args));
217 nested_rev_autodiff nested;
219 auto ll_args_copy = internal::deep_copy_vargs<var>(ll_args_refs);
220 auto covar_args_copy = internal::deep_copy_vargs<var>(covar_args_refs);
222 [&covariance_function, &msgs](
auto&&... args) {
224 return to_var_value(covariance_function(args..., msgs));
226 return covariance_function(args..., msgs);
230 decltype(
auto) covariance_val =
value_of(covariance);
231 decltype(
auto) ll_args_vals =
value_of(ll_args_copy);
233 ll_fun, ll_args_vals, covariance_val, options, msgs);
237 const bool solver_1_or_2
238 = md_est.solver_used == 1 || md_est.solver_used == 2;
239 arena_t<Eigen::MatrixXd> R(md_est.theta.size() * solver_1_or_2,
240 md_est.theta.size() * solver_1_or_2);
242 arena_t<Eigen::MatrixXd> LU_solve_covariance(
243 covariance.rows() * (md_est.solver_used == 3),
244 covariance.cols() * (md_est.solver_used == 3));
246 arena_t<Eigen::VectorXd> s2(md_est.theta.size());
248 if (md_est.solver_used == 1) {
249 if (options.hessian_block_size == 1) {
250 arena_t<Eigen::MatrixXd> tmp = md_est.W_r.toDense();
251 md_est.L.template triangularView<Eigen::Lower>().solveInPlace(tmp);
252 R.noalias() = tmp.transpose() * tmp;
253 arena_t<Eigen::MatrixXd> C
254 = md_est.L.template triangularView<Eigen::Lower>().solve(
255 md_est.W_r * covariance_val);
256 if constexpr (ll_args_contain_var) {
257 arena_t<Eigen::MatrixXd> A = covariance_val - C.transpose() * C;
259 ll_fun, md_est.theta, A, options.hessian_block_size, ll_args_copy,
261 s2.deep_copy(s2_tmp);
262 internal::copy_compute_s2<ZeroOut>(partial_parm, ll_args_filter);
266 * (covariance_val.diagonal() - (C.transpose() * C).diagonal())
268 ll_fun, md_est.theta, ll_args_vals, msgs))));
272 arena_t<Eigen::MatrixXd> tmp = md_est.W_r.toDense();
273 md_est.L.template triangularView<Eigen::Lower>().solveInPlace(tmp);
274 R.noalias() = tmp.transpose() * tmp;
275 arena_t<Eigen::MatrixXd> C
276 = md_est.L.template triangularView<Eigen::Lower>().solve(
277 md_est.W_r * covariance_val);
278 arena_t<Eigen::MatrixXd> A = covariance_val - C.transpose() * C;
280 options.hessian_block_size,
282 s2.deep_copy(s2_tmp);
283 internal::copy_compute_s2<ZeroOut>(partial_parm, ll_args_filter);
285 }
else if (md_est.solver_used == 2) {
287 - md_est.W_r * md_est.K_root
288 * md_est.L.transpose()
289 .template triangularView<Eigen::Upper>()
291 md_est.L.template triangularView<Eigen::Lower>()
292 .solve(md_est.K_root.transpose() * md_est.W_r));
294 arena_t<Eigen::MatrixXd> C
295 = md_est.L.template triangularView<Eigen::Lower>().solve(
296 md_est.K_root.transpose());
298 ll_fun, md_est.theta, (C.transpose() * C).eval(),
299 options.hessian_block_size, ll_args_copy, msgs);
300 s2.deep_copy(s2_tmp);
301 internal::copy_compute_s2<ZeroOut>(partial_parm, ll_args_filter);
303 LU_solve_covariance = md_est.LU.solve(covariance_val);
305 = Eigen::MatrixXd::Identity(md_est.W_r.rows(), md_est.W_r.cols())
306 - LU_solve_covariance * md_est.W_r;
307 R = md_est.W_r * I_minus_BinvKW;
308 arena_t<Eigen::MatrixXd> A
309 = covariance_val - covariance_val * md_est.W_r * LU_solve_covariance;
311 options.hessian_block_size,
313 s2.deep_copy(s2_tmp);
314 internal::copy_compute_s2<ZeroOut>(partial_parm, ll_args_filter);
316 if constexpr (is_any_var_scalar_v<scalar_type_t<CovarArgs>>) {
317 arena_t<Eigen::MatrixXd> K_adj_arena
318 = 0.5 * md_est.a * md_est.a.transpose() - 0.5 * R
319 + s2 * md_est.theta_grad.transpose()
320 - (R * (covariance.val() * s2)) * md_est.theta_grad.transpose();
322 0.0, [covariance, K_adj_arena](
auto&& vi)
mutable {
323 covariance.adj().array() += vi.adj() * K_adj_arena.array();
326 auto covar_args_filter
330 if constexpr (ll_args_contain_var) {
332 internal::collect_adjoints<ZeroOut>(partial_parm, ll_args_filter);
333 arena_t<Eigen::VectorXd> v;
334 if (md_est.solver_used == 1 || md_est.solver_used == 2) {
335 v = covariance_val * s2 - covariance_val * R * covariance_val * s2;
337 v = LU_solve_covariance * s2;
341 internal::collect_adjoints<ZeroOut>(partial_parm, ll_args_filter);
346 if constexpr (is_any_var_scalar_v<CovarArgs>) {
349 std::move(covar_args_adj));
351 if constexpr (ll_args_contain_var) {
354 std::move(partial_parm));
Reference for calculations of marginal and its gradients: Margossian et al (2020),...
constexpr decltype(auto) filter_var_scalar_types(T &&t)
Filter a tuple and return a tuple with references to the types with a var scalar type.
void reverse_pass_collect_adjoints(var ret, Output &&output, Input &&input)
Collects adjoints from a tuple or std::vector of tuples.
constexpr auto make_zeroed_arena(Input &&input)
Creates an arena type that is the same type as the input and initialized with zeros.
auto laplace_marginal_density_est(LLFun &&ll_fun, LLTupleArgs &&ll_args, CovarMat &&covariance, const laplace_options< InitTheta > &options, std::ostream *msgs)
For a latent Gaussian model with hyperparameters phi and latent variables theta, and observations y,...
void collect_adjoints(Output &output, Input &&input)
Collect the adjoints from the input and add them to the output.
constexpr void copy_compute_s2(Output &&output, Input &&input)
auto compute_s2(F &&f, Theta &&theta, AMat &&A, int hessian_block_size, TupleArgs &&ll_args, Stream *msgs)
A wrapper that accepts a tuple as arguments.
auto diff_eta_implicit(F &&f, V_t &&v, Theta &&theta, TupleArgs &&ll_args, Stream *msgs)
A wrapper that accepts a tuple as arguments.
auto ll_arg_grad(F &&f, Theta &&theta, TupleArgs &&ll_tup, Stream *msgs=nullptr)
Eigen::VectorXd third_diff(F &&f, Theta &&theta, TupleArgs &&ll_args, Stream *msgs)
A wrapper that accepts a tuple as arguments.
void iter_tuple_nested(F &&f, Types &&... args)
Iterate and nest into a tuple or std::vector to apply f to each matrix or scalar type.
var_value< plain_type_t< T > > make_callback_var(T &&value, F &&functor)
Creates a new var initialized with a callback_vari with a given value and reverse-pass callback funct...
void reverse_pass_callback(F &&functor)
Puts a callback on the autodiff stack to be called in reverse pass.
constexpr void for_each(F &&f, const std::tuple<> &)
Apply a function to each element of a tuple.
T value_of(const fvar< T > &v)
Return the value of the specified variable.
auto laplace_marginal_density(LLFun &&ll_fun, LLTupleArgs &&ll_args, CovarFun &&covariance_function, CovarArgs &&covar_args, const laplace_options< InitTheta > &options, std::ostream *msgs)
For a latent Gaussian model with global parameters phi, latent variables theta, and observations y,...
arena_t< T > to_arena(const T &a)
Converts given argument into a type that either has any dynamic allocation on AD stack or schedules i...
var_value< Eigen::Matrix< double, T::RowsAtCompileTime, T::ColsAtCompileTime > > to_var_value(const T &a)
Converts an Eigen matrix (or vector or row_vector) or expression of vars into var_value.
ref_type_t< T && > to_ref(T &&a)
This evaluates expensive Eigen expressions.
static void grad()
Compute the gradient for all variables starting from the end of the AD tape.
constexpr decltype(auto) apply(F &&f, Tuple &&t, PreArgs &&... pre_args)
constexpr bool is_any_var_scalar_v
std::enable_if_t< Check::value > require_t
If condition is true, template is enabled.
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...