Automatic Differentiation
 
Loading...
Searching...
No Matches
finite_diff.hpp
Go to the documentation of this file.
1#ifndef STAN_MATH_FWD_FUNCTOR_FINITE_DIFF_HPP
2#define STAN_MATH_FWD_FUNCTOR_FINITE_DIFF_HPP
3
10
11namespace stan {
12namespace math {
13namespace internal {
25template <typename FuncTangent, typename InputArg,
27inline constexpr double aggregate_tangent(const FuncTangent& tangent,
28 const InputArg& arg) {
29 return 0;
30}
31
44template <typename FuncTangent, typename InputArg,
46inline auto aggregate_tangent(const FuncTangent& tangent, const InputArg& arg) {
48 tangent, arg, [](const auto& x, const auto& y) { return x * y.d_; }));
49}
50} // namespace internal
51
67template <typename F, typename... TArgs,
68 require_any_st_fvar<TArgs...>* = nullptr>
69inline auto finite_diff(const F& func, const TArgs&... args) {
70 using FvarT = return_type_t<TArgs...>;
71 using FvarInnerT = typename FvarT::Scalar;
72
73 std::vector<FvarInnerT> serialised_args
74 = serialize<FvarInnerT>(value_of(args)...);
75
76 auto serial_functor = [&](const auto& v) {
77 auto v_deserializer = to_deserializer(v);
78 return func(v_deserializer.read(args)...);
79 };
80
81 FvarInnerT rtn_value;
82 std::vector<FvarInnerT> grad;
83 finite_diff_gradient_auto(serial_functor, serialised_args, rtn_value, grad);
84
85 FvarInnerT rtn_grad = 0;
86 auto grad_deserializer = to_deserializer(grad);
87 // Use a fold-expression to aggregate tangents for input arguments
88 static_cast<void>(
89 std::initializer_list<int>{(rtn_grad += internal::aggregate_tangent(
90 grad_deserializer.read(args), args),
91 0)...});
92
93 return FvarT(rtn_value, rtn_grad);
94}
95
111template <typename F, typename... TArgs,
112 require_all_not_st_fvar<TArgs...>* = nullptr>
113inline auto finite_diff(const F& func, const TArgs&... args) {
114 return func(args...);
115}
116
117} // namespace math
118} // namespace stan
119
120#endif
require_all_not_t< is_fvar< scalar_type_t< std::decay_t< Types > > >... > require_all_not_st_fvar
Require none of the scalar types satisfy is_fvar.
Definition is_fvar.hpp:96
require_any_t< is_fvar< scalar_type_t< std::decay_t< Types > > >... > require_any_st_fvar
Require any of the scalar types satisfy is_fvar.
Definition is_fvar.hpp:89
require_t< is_fvar< scalar_type_t< std::decay_t< T > > > > require_st_fvar
Require scalar type satisfies is_fvar.
Definition is_fvar.hpp:76
require_not_t< is_fvar< scalar_type_t< std::decay_t< T > > > > require_not_st_fvar
Require scalar type does not satisfy is_fvar.
Definition is_fvar.hpp:82
typename return_type< Ts... >::type return_type_t
Convenience type for the return type of the specified template parameters.
constexpr double aggregate_tangent(const FuncTangent &tangent, const InputArg &arg)
Helper function for aggregating tangents if the respective input argument was an fvar<T> type.
void finite_diff_gradient_auto(const F &f, VectorT &&x, ScalarT &fx, GradVectorT &grad_fx)
Calculate the value and the gradient of the specified function at the specified argument using finite...
deserializer< T > to_deserializer(const std::vector< T > &vals)
Return a deserializer based on the specified values.
fvar< T > arg(const std::complex< fvar< T > > &z)
Return the phase angle of the complex argument.
Definition arg.hpp:19
T value_of(const fvar< T > &v)
Return the value of the specified variable.
Definition value_of.hpp:18
auto sum(const std::vector< T > &m)
Return the sum of the entries of the specified standard vector.
Definition sum.hpp:23
auto apply_scalar_binary(const T1 &x, const T2 &y, const F &f)
Base template function for vectorization of binary scalar functions defined by applying a functor to ...
auto finite_diff(const F &func, const TArgs &... args)
Construct an fvar<T> where the tangent is calculated by finite-differencing.
static void grad()
Compute the gradient for all variables starting from the end of the AD tape.
Definition grad.hpp:26
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...