Automatic Differentiation
 
Loading...
Searching...
No Matches
hypergeometric_pFq.hpp
Go to the documentation of this file.
1#ifndef STAN_MATH_REV_FUN_HYPERGEOMETRIC_PFQ_HPP
2#define STAN_MATH_REV_FUN_HYPERGEOMETRIC_PFQ_HPP
3
8
9namespace stan {
10namespace math {
11
24template <typename Ta, typename Tb, typename Tz,
25 bool grad_a = !is_constant<Ta>::value,
26 bool grad_b = !is_constant<Tb>::value,
27 bool grad_z = !is_constant<Tz>::value,
28 require_all_matrix_t<Ta, Tb>* = nullptr,
29 require_return_type_t<is_var, Ta, Tb, Tz>* = nullptr>
30inline var hypergeometric_pFq(const Ta& a, const Tb& b, const Tz& z) {
31 arena_t<Ta> arena_a = a;
32 arena_t<Tb> arena_b = b;
33 auto pfq_val = hypergeometric_pFq(a.val(), b.val(), value_of(z));
34 return make_callback_var(
35 pfq_val, [arena_a, arena_b, z, pfq_val](auto& vi) mutable {
36 auto grad_tuple = grad_pFq<grad_a, grad_b, grad_z>(
37 pfq_val, arena_a.val(), arena_b.val(), value_of(z));
38 if constexpr (grad_a) {
39 forward_as<promote_scalar_t<var, Ta>>(arena_a).adj()
40 += vi.adj() * std::get<0>(grad_tuple);
41 }
42 if constexpr (grad_b) {
43 forward_as<promote_scalar_t<var, Tb>>(arena_b).adj()
44 += vi.adj() * std::get<1>(grad_tuple);
45 }
46 if constexpr (grad_z) {
47 forward_as<promote_scalar_t<var, Tz>>(z).adj()
48 += vi.adj() * std::get<2>(grad_tuple);
49 }
50 });
51}
52} // namespace math
53} // namespace stan
54#endif
var_value< plain_type_t< T > > make_callback_var(T &&value, F &&functor)
Creates a new var initialized with a callback_vari with a given value and reverse-pass callback funct...
T value_of(const fvar< T > &v)
Return the value of the specified variable.
Definition value_of.hpp:18
FvarT hypergeometric_pFq(const Ta &a, const Tb &b, const Tz &z)
Returns the generalized hypergeometric (pFq) function applied to the input arguments.
typename internal::arena_type_impl< std::decay_t< T > >::type arena_t
Determines a type that can be used in place of T that does any dynamic allocations on the AD stack.
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...