1#ifndef STAN_MATH_MIX_FUNCTOR_LAPLACE_LIKELIHOOD_HPP
2#define STAN_MATH_MIX_FUNCTOR_LAPLACE_LIKELIHOOD_HPP
15namespace laplace_likelihood {
27template <
typename F,
typename Theta,
typename Stream,
typename... Args,
29inline auto log_likelihood(F&& f, Theta&& theta, Stream* msgs, Args&&... args) {
30 return std::forward<F>(f)(std::forward<Theta>(theta),
31 std::forward<Args>(args)..., msgs);
52template <
template <
typename...>
class Filter,
56 return map_if<Filter>(
60 [](
auto&&... inner_args) {
64 std::forward<
decltype(inner_args)>(inner_args))...);
66 std::forward<decltype(arg)>(
arg));
69 Filter, PromotedType, CopyType>(
arg[0]))>
71 for (std::size_t i = 0; i <
arg.size(); ++i) {
73 conditional_copy_and_promote<Filter, PromotedType, CopyType>(
82 if constexpr (std::is_same_v<PromotedType,
84 return std::forward<decltype(arg)>(
arg);
87 std::forward<
decltype(
arg)>(
arg)));
92 std::forward<Args>(args)...);
95template <
typename PromotedType,
typename... Args>
99 std::forward<Args>(args)...);
102template <
typename PromotedType,
typename... Args>
106 std::forward<Args>(args)...);
126template <
typename F,
typename Theta,
typename Stream,
typename... Args,
128inline auto diff(F&& f, Theta&& theta,
const Eigen::Index hessian_block_size,
129 Stream* msgs, Args&&... args) {
130 using Eigen::Dynamic;
132 const Eigen::Index theta_size = theta.size();
133 auto theta_gradient = [&theta, &f, &msgs](
auto&&... args) {
135 Matrix<var, Dynamic, 1> theta_var = theta;
136 var f_var = f(theta_var, args..., msgs);
138 return theta_var.adj().eval();
140 if (hessian_block_size == 1) {
141 auto v = Eigen::VectorXd::Ones(theta_size);
142 Eigen::VectorXd hessian_v = Eigen::VectorXd::Zero(theta_size);
145 Eigen::SparseMatrix<double> hessian_theta(theta_size, theta_size);
146 hessian_theta.reserve(Eigen::VectorXi::Constant(theta_size, 1));
147 for (Eigen::Index i = 0; i < theta_size; i++) {
148 hessian_theta.insert(i, i) = hessian_v(i);
150 return std::make_pair(std::move(theta_gradient), (-hessian_theta).
eval());
152 return std::make_pair(
153 std::move(theta_gradient),
173template <
typename F,
typename Theta,
typename Stream,
typename... Args,
175inline Eigen::VectorXd
third_diff(F&& f, Theta&& theta, Stream&& msgs,
178 const Eigen::Index theta_size = theta.size();
179 Eigen::Matrix<var, Eigen::Dynamic, 1> theta_var = std::forward<Theta>(theta);
180 Eigen::Matrix<fvar<fvar<var>>, Eigen::Dynamic, 1> theta_ffvar(theta_size);
181 for (Eigen::Index i = 0; i < theta_size; ++i) {
185 grad(ftheta_ffvar.
d_.d_.vi_);
186 return theta_var.adj().eval();
210template <
typename F,
typename Theta,
typename AMat,
typename Stream,
213 const int hessian_block_size, Stream* msgs,
215 using Eigen::Dynamic;
217 using Eigen::MatrixXd;
218 using Eigen::VectorXd;
221 const Eigen::Index theta_size = theta.size();
222 Matrix<var, Dynamic, 1> theta_var = std::forward<Theta>(theta);
223 int n_blocks = theta_size / hessian_block_size;
224 VectorXd v(theta_size);
225 VectorXd w(theta_size);
226 Matrix<fvar<fvar<var>>, Dynamic, 1> theta_ffvar(theta_size);
227 auto shallow_copy_args
228 = shallow_copy_vargs<fvar<fvar<var>>>(std::forward_as_tuple(args...));
229 for (Eigen::Index i = 0; i < hessian_block_size; ++i) {
232 for (
int j = i; j < theta_size; j += hessian_block_size) {
236 for (
int j = 0; j < n_blocks; ++j) {
237 for (
int k = 0; k < hessian_block_size; ++k) {
238 w(k + j * hessian_block_size)
239 = A(k + j * hessian_block_size, i + j * hessian_block_size);
242 for (
int j = 0; j < theta_size; ++j) {
246 [](
auto&& f,
auto&& theta_ffvar,
auto&& msgs,
auto&&... inner_args) {
247 return f(theta_ffvar, inner_args..., msgs);
249 shallow_copy_args, f, theta_ffvar, msgs);
250 grad(target_ffvar.
d_.d_.vi_);
252 return (0.5 * theta_var.adj()).eval();
274template <
typename F,
typename V_t,
typename Theta,
typename Stream,
278 using Eigen::Dynamic;
280 using Eigen::VectorXd;
282 if constexpr (!contains_var) {
286 const Eigen::Index theta_size = theta.size();
287 Matrix<var, Dynamic, 1> theta_var = std::forward<Theta>(theta);
288 Matrix<fvar<var>, Dynamic, 1> theta_fvar(theta_size);
289 for (Eigen::Index i = 0; i < theta_size; i++) {
290 theta_fvar(i) =
fvar<var>(theta_var(i), v(i));
292 auto shallow_copy_args
293 = shallow_copy_vargs<fvar<var>>(std::forward_as_tuple(args...));
295 [](
auto&& f,
auto&& theta_fvar,
auto&& msgs,
auto&&... inner_args) {
296 return f(theta_fvar, inner_args..., msgs);
298 shallow_copy_args, f, theta_fvar, msgs);
315template <
typename F,
typename Theta,
typename TupleArgs,
typename Stream,
321 [](
auto&& f,
auto&& theta,
auto&& msgs,
auto&&... args) {
323 std::forward<
decltype(f)>(f), std::forward<
decltype(theta)>(theta),
324 msgs, std::forward<
decltype(args)>(args)...);
326 std::forward<TupleArgs>(ll_tup), std::forward<F>(f),
327 std::forward<Theta>(theta), msgs);
343template <
typename F,
typename Theta,
typename TupleArgs,
typename Stream,
346inline auto diff(F&& f, Theta&& theta,
const Eigen::Index hessian_block_size,
347 TupleArgs&& ll_tuple, Stream* msgs) {
349 [](
auto&& f,
auto&& theta,
auto hessian_block_size,
auto* msgs,
352 std::forward<
decltype(f)>(f), std::forward<
decltype(theta)>(theta),
353 hessian_block_size, msgs, std::forward<
decltype(args)>(args)...);
355 std::forward<TupleArgs>(ll_tuple), std::forward<F>(f),
356 std::forward<Theta>(theta), hessian_block_size, msgs);
370template <
typename F,
typename Theta,
typename TupleArgs,
typename Stream,
373inline Eigen::VectorXd
third_diff(F&& f, Theta&& theta, TupleArgs&& ll_args,
376 [](
auto&& f,
auto&& theta,
auto&& msgs,
auto&&... args) {
378 std::forward<
decltype(theta)>(theta), msgs,
379 std::forward<
decltype(args)>(args)...);
381 std::forward<TupleArgs>(ll_args), std::forward<F>(f),
382 std::forward<Theta>(theta), msgs);
400template <
typename F,
typename Theta,
typename AMat,
typename TupleArgs,
403inline auto compute_s2(F&& f, Theta&& theta, AMat&& A,
int hessian_block_size,
404 TupleArgs&& ll_args, Stream* msgs) {
406 [](
auto&& f,
auto&& theta,
auto&& A,
auto hessian_block_size,
auto* msgs,
409 std::forward<
decltype(f)>(f), std::forward<
decltype(theta)>(theta),
410 std::forward<
decltype(A)>(A), hessian_block_size, msgs,
411 std::forward<
decltype(args)>(args)...);
413 std::forward<TupleArgs>(ll_args), std::forward<F>(f),
414 std::forward<Theta>(theta), std::forward<AMat>(A), hessian_block_size,
431template <
typename F,
typename V_t,
typename Theta,
typename TupleArgs,
435 TupleArgs&& ll_args, Stream* msgs) {
437 [](
auto&& f,
auto&& v,
auto&& theta,
auto&& msgs,
auto&&... args) {
439 std::forward<
decltype(f)>(f), std::forward<
decltype(v)>(v),
440 std::forward<
decltype(theta)>(theta), msgs,
441 std::forward<
decltype(args)>(args)...);
443 std::forward<TupleArgs>(ll_args), std::forward<F>(f),
444 std::forward<V_t>(v), std::forward<Theta>(theta), msgs);
A class following the RAII idiom to start and recover nested autodiff scopes.
require_t< is_eigen_vector< std::decay_t< T > > > require_eigen_vector_t
Require type satisfies is_eigen_vector.
require_t< container_type_check_base< is_eigen_vector, value_type_t, TypeCheck, Check... > > require_eigen_vector_vt
Require type satisfies is_eigen_vector.
require_t< is_tuple< std::decay_t< T > > > require_tuple_t
Require type satisfies is_tuple.
auto deep_copy_vargs(Args &&... args)
auto conditional_copy_and_promote(Args &&... args)
Conditional copy and promote a type's scalar type to a PromotedType.
Eigen::VectorXd third_diff(F &&f, Theta &&theta, Stream &&msgs, Args &&... args)
Compute third order derivative of f wrt theta and args...
auto diff_eta_implicit(F &&f, V_t &&v, Theta &&theta, Stream *msgs, Args &&... args)
Compute second order gradient of f wrt theta and args...
auto log_likelihood(F &&f, Theta &&theta, Stream *msgs, Args &&... args)
auto diff(F &&f, Theta &&theta, const Eigen::Index hessian_block_size, Stream *msgs, Args &&... args)
Computes theta gradient and negative block diagonal Hessian of f wrt theta and args....
auto compute_s2(F &&f, Theta &&theta, AMat &&A, const int hessian_block_size, Stream *msgs, Args &&... args)
The derivative of the log likelihood wrt theta evaluated at the mode.
auto shallow_copy_vargs(Args &&... args)
COPY_TYPE
Decide if object should be deep or shallow copied when using conditional_copy_and_promote .
auto compute_s2(F &&f, Theta &&theta, AMat &&A, int hessian_block_size, TupleArgs &&ll_args, Stream *msgs)
A wrapper that accepts a tuple as arguments.
auto diff(F &&f, Theta &&theta, const Eigen::Index hessian_block_size, TupleArgs &&ll_tuple, Stream *msgs)
A wrapper that accepts a tuple as arguments.
auto diff_eta_implicit(F &&f, V_t &&v, Theta &&theta, TupleArgs &&ll_args, Stream *msgs)
A wrapper that accepts a tuple as arguments.
auto log_likelihood(F &&f, Theta &&theta, TupleArgs &&ll_tup, Stream *msgs)
A wrapper that accepts a tuple as arguments.
Eigen::VectorXd third_diff(F &&f, Theta &&theta, TupleArgs &&ll_args, Stream *msgs)
A wrapper that accepts a tuple as arguments.
double value_of_rec(const fvar< T > &v)
Return the value of the specified variable.
Eigen::SparseMatrix< double > hessian_block_diag(F &&f, const Eigen::VectorXd &x, const Eigen::Index hessian_block_size, Args &&... args)
Returns a block diagonal Hessian by computing the relevant directional derivatives and storing them i...
fvar< T > arg(const std::complex< fvar< T > > &z)
Return the phase angle of the complex argument.
T eval(T &&arg)
Inputs which have a plain_type equal to the own time are forwarded unmodified (for Eigen expressions ...
constexpr auto make_holder_tuple(Types &&... args)
Holds ownership of rvalues and forwards lvalues into a tuple.
T value_of(const fvar< T > &v)
Return the value of the specified variable.
void hessian_times_vector(const F &f, const Eigen::Matrix< double, Eigen::Dynamic, 1 > &x, const Eigen::Matrix< double, Eigen::Dynamic, 1 > &v, double &fx, Eigen::Matrix< double, Eigen::Dynamic, 1 > &Hv)
constexpr bool is_tuple_v
static void grad()
Compute the gradient for all variables starting from the end of the AD tape.
constexpr decltype(auto) apply(F &&f, Tuple &&t, PreArgs &&... pre_args)
typename scalar_type< T >::type scalar_type_t
constexpr bool is_std_vector_v
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...
Scalar d_
The tangent (derivative) of this variable.
This template class represents scalars used in forward-mode automatic differentiation,...