Automatic Differentiation
 
Loading...
Searching...
No Matches
gradient.hpp
Go to the documentation of this file.
1#ifndef STAN_MATH_REV_FUNCTOR_GRADIENT_HPP
2#define STAN_MATH_REV_FUNCTOR_GRADIENT_HPP
3
7#include <stdexcept>
8#include <vector>
9
10namespace stan {
11namespace math {
12
45template <typename F>
46void gradient(const F& f, const Eigen::Matrix<double, Eigen::Dynamic, 1>& x,
47 double& fx, Eigen::Matrix<double, Eigen::Dynamic, 1>& grad_fx) {
49
50 Eigen::Matrix<var, Eigen::Dynamic, 1> x_var(x);
51 var fx_var = f(x_var);
52 fx = fx_var.val();
53 grad_fx.resize(x.size());
54 grad(fx_var.vi_);
55 grad_fx = x_var.adj();
56}
57
100template <typename F, typename EigVec, typename InputIt,
102void gradient(const F& f, const EigVec& x, double& fx, InputIt first_grad_fx,
103 InputIt last_grad_fx) {
104 nested_rev_autodiff nested;
105
106 if (last_grad_fx - first_grad_fx != x.size()) {
107 std::stringstream s;
108 s << "gradient(): iterator and gradient different sizes; iterator size = "
109 << last_grad_fx - first_grad_fx << "; grad size = " << x.size()
110 << std::endl;
111 throw std::invalid_argument(s.str());
112 }
113
114 Eigen::Matrix<var, Eigen::Dynamic, 1> x_var(x);
115 var fx_var = f(x_var);
116 fx = fx_var.val();
117 grad(fx_var.vi_);
118 for (Eigen::VectorXd::Index i = 0; i < x_var.size(); ++i) {
119 *first_grad_fx++ = x_var.coeff(i).adj();
120 }
121}
122
123} // namespace math
124} // namespace stan
125#endif
A class following the RAII idiom to start and recover nested autodiff scopes.
require_t< container_type_check_base< is_eigen_vector, value_type_t, TypeCheck, Check... > > require_eigen_vector_vt
Require type satisfies is_eigen_vector.
void gradient(const F &f, const Eigen::Matrix< T, Eigen::Dynamic, 1 > &x, T &fx, Eigen::Matrix< T, Eigen::Dynamic, 1 > &grad_fx)
Calculate the value and the gradient of the specified function at the specified argument.
Definition gradient.hpp:40
static void grad()
Compute the gradient for all variables starting from the end of the AD tape.
Definition grad.hpp:26
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...