1#ifndef STAN_MATH_MIX_FUNCTOR_GRAD_TR_MAT_TIMES_HESSIAN_HPP
2#define STAN_MATH_MIX_FUNCTOR_GRAD_TR_MAT_TIMES_HESSIAN_HPP
16 const F& f,
const Eigen::Matrix<double, Eigen::Dynamic, 1>& x,
17 const Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>& M,
18 Eigen::Matrix<double, Eigen::Dynamic, 1>& grad_tr_MH) {
25 grad_tr_MH.resize(x.size());
27 Matrix<var, Dynamic, 1> x_var(x.size());
28 for (
int i = 0; i < x.size(); ++i) {
32 Matrix<fvar<var>, Dynamic, 1> x_fvar(x.size());
35 Matrix<double, Dynamic, 1> M_n(x.size());
36 for (
int n = 0; n < x.size(); ++n) {
37 for (
int k = 0; k < x.size(); ++k) {
40 for (
int k = 0; k < x.size(); ++k) {
45 gradient_dot_vector<fvar<var>,
double>(f, x_fvar, M_n, fx, grad_fx_dot_v);
46 sum += grad_fx_dot_v.
d_;
50 for (
int i = 0; i < x.size(); ++i) {
51 grad_tr_MH(i) = x_var(i).adj();
A class following the RAII idiom to start and recover nested autodiff scopes.
fvar< T > sum(const std::vector< fvar< T > > &m)
Return the sum of the entries of the specified standard vector.
void grad_tr_mat_times_hessian(const F &f, const Eigen::Matrix< double, Eigen::Dynamic, 1 > &x, const Eigen::Matrix< double, Eigen::Dynamic, Eigen::Dynamic > &M, Eigen::Matrix< double, Eigen::Dynamic, 1 > &grad_tr_MH)
static void grad()
Compute the gradient for all variables starting from the end of the AD tape.
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...
Scalar d_
The tangent (derivative) of this variable.
This template class represents scalars used in forward-mode automatic differentiation,...