Automatic Differentiation
 
Loading...
Searching...
No Matches
gradient.hpp
Go to the documentation of this file.
1#ifndef STAN_MATH_REV_FUNCTOR_GRADIENT_HPP
2#define STAN_MATH_REV_FUNCTOR_GRADIENT_HPP
3
7#include <stdexcept>
8#include <vector>
9
10namespace stan {
11namespace math {
12
45template <typename F>
46inline void gradient(const F& f,
47 const Eigen::Matrix<double, Eigen::Dynamic, 1>& x,
48 double& fx,
49 Eigen::Matrix<double, Eigen::Dynamic, 1>& grad_fx) {
51
52 Eigen::Matrix<var, Eigen::Dynamic, 1> x_var(x);
53 var fx_var = f(x_var);
54 fx = fx_var.val();
55 grad_fx.resize(x.size());
56 grad(fx_var.vi_);
57 grad_fx = x_var.adj();
58}
59
102template <typename F, typename EigVec, typename InputIt,
104inline void gradient(const F& f, const EigVec& x, double& fx,
105 InputIt first_grad_fx, InputIt last_grad_fx) {
106 nested_rev_autodiff nested;
107
108 if (last_grad_fx - first_grad_fx != x.size()) {
109 std::stringstream s;
110 s << "gradient(): iterator and gradient different sizes; iterator size = "
111 << last_grad_fx - first_grad_fx << "; grad size = " << x.size()
112 << std::endl;
113 throw std::invalid_argument(s.str());
114 }
115
116 Eigen::Matrix<var, Eigen::Dynamic, 1> x_var(x);
117 var fx_var = f(x_var);
118 fx = fx_var.val();
119 grad(fx_var.vi_);
120 for (Eigen::VectorXd::Index i = 0; i < x_var.size(); ++i) {
121 *first_grad_fx++ = x_var.coeff(i).adj();
122 }
123}
124
125} // namespace math
126} // namespace stan
127#endif
A class following the RAII idiom to start and recover nested autodiff scopes.
require_t< container_type_check_base< is_eigen_vector, value_type_t, TypeCheck, Check... > > require_eigen_vector_vt
Require type satisfies is_eigen_vector.
void gradient(const F &f, const Eigen::Matrix< T, Eigen::Dynamic, 1 > &x, T &fx, Eigen::Matrix< T, Eigen::Dynamic, 1 > &grad_fx)
Calculate the value and the gradient of the specified function at the specified argument.
Definition gradient.hpp:40
static void grad()
Compute the gradient for all variables starting from the end of the AD tape.
Definition grad.hpp:26
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...