Automatic Differentiation
 
Loading...
Searching...
No Matches
binomial_logit_glm_lpmf.hpp
Go to the documentation of this file.
1#ifndef STAN_MATH_PRIM_PROB_BINOMIAL_LOGIT_GLM_LPMF_HPP
2#define STAN_MATH_PRIM_PROB_BINOMIAL_LOGIT_GLM_LPMF_HPP
3
17
18namespace stan {
19namespace math {
20
54template <bool propto, typename T_n, typename T_N, typename T_x,
55 typename T_alpha, typename T_beta, require_matrix_t<T_x>* = nullptr>
57 const T_n& n, const T_N& N, const T_x& x, const T_alpha& alpha,
58 const T_beta& beta) {
59 constexpr int T_x_rows = T_x::RowsAtCompileTime;
60 using T_partials_return = partials_return_t<T_x, T_alpha, T_beta>;
61 using T_n_ref = ref_type_if_t<is_autodiff_v<T_n>, T_n>;
62 using T_N_ref = ref_type_if_t<is_autodiff_v<T_N>, T_N>;
63 using T_x_ref = ref_type_if_t<is_autodiff_v<T_x>, T_x>;
64 using T_alpha_ref = ref_type_if_t<is_autodiff_v<T_alpha>, T_alpha>;
65 using T_beta_ref = ref_type_if_t<is_autodiff_v<T_beta>, T_beta>;
66
67 T_n_ref n_ref = n;
68 T_N_ref N_ref = N;
69 T_x_ref x_ref = x;
70 T_alpha_ref alpha_ref = alpha;
71 T_beta_ref beta_ref = beta;
72
73 if (size_zero(n, N, alpha, beta, x)) {
74 return 0;
75 }
76
78 return 0;
79 }
80
81 const size_t N_instances = max_size(n, N, x.col(0), alpha);
82 const size_t N_attributes = x.cols();
83
84 static const char* function = "binomial_logit_glm_lpmf";
85 check_consistent_sizes(function, "Successes variable", n,
86 "Population size parameter", N);
87 check_consistent_size(function, "Successes variable", n, N_instances);
88 check_consistent_size(function, "Population size parameter", N, N_instances);
89 check_consistent_size(function, "Weight vector", beta, N_attributes);
90 check_consistent_size(function, "Vector of intercepts", alpha, N_instances);
91
92 auto&& n_val = as_value_column_array_or_scalar(n_ref);
93 auto&& N_val = as_value_column_array_or_scalar(N_ref);
94
95 check_bounded(function, "Successes variable", n_val, 0, N_val);
96 check_nonnegative(function, "Population size parameter", N_val);
97
98 auto&& alpha_val = as_value_column_array_or_scalar(alpha_ref);
99 auto&& beta_val = as_value_column_vector_or_scalar(beta_ref);
100 auto&& x_val = value_of(x_ref);
101 Eigen::Array<T_partials_return, -1, 1> theta(N_instances);
102 if constexpr (T_x_rows == 1) {
103 theta = (x_val * beta_val)(0, 0) + alpha_val;
104 } else {
105 theta = (x_val * beta_val).array() + alpha_val;
106 }
107
108 constexpr bool gradients_calc = is_any_autodiff_v<T_beta, T_x, T_alpha>;
109 auto&& log_inv_logit_theta = to_ref_if<gradients_calc>(log_inv_logit(theta));
110
111 T_partials_return logp = sum(n_val * log_inv_logit_theta
112 + (N_val - n_val) * log1m_inv_logit(theta));
113
114 using std::isfinite;
115 if (!isfinite(logp)) {
116 check_finite(function, "Weight vector", beta);
117 check_finite(function, "Intercept", alpha);
118 check_finite(function, "Matrix of independent variables", x);
119 }
120
122 size_t broadcast_n = max_size(N, n) == N_instances ? 1 : N_instances;
123 logp += sum(binomial_coefficient_log(N_val, n_val)) * broadcast_n;
124 }
125
126 auto ops_partials = make_partials_propagator(x_ref, alpha_ref, beta_ref);
127 if constexpr (gradients_calc) {
128 Eigen::Matrix<T_partials_return, -1, 1> theta_derivative
129 = n_val - N_val * exp(log_inv_logit_theta);
130
131 if constexpr (is_autodiff_v<T_beta>) {
132 if constexpr (T_x_rows == 1) {
133 edge<2>(ops_partials).partials_ = theta_derivative.sum() * x_val;
134 } else {
135 partials<2>(ops_partials) = x_val.transpose() * theta_derivative;
136 }
137 }
138
139 if constexpr (is_autodiff_v<T_x>) {
140 if constexpr (T_x_rows == 1) {
141 edge<0>(ops_partials).partials_ = beta_val * theta_derivative.sum();
142 } else {
143 edge<0>(ops_partials).partials_
144 = (beta_val * theta_derivative.transpose()).transpose();
145 }
146 }
147 if constexpr (is_autodiff_v<T_alpha>) {
148 partials<1>(ops_partials) = theta_derivative;
149 }
150 }
151 return ops_partials.build(logp);
152}
153
154template <typename T_n, typename T_N, typename T_x, typename T_alpha,
155 typename T_beta>
157 const T_n& n, const T_N& N, const T_x& x, const T_alpha& alpha,
158 const T_beta& beta) {
159 return binomial_logit_glm_lpmf<false>(n, N, x, alpha, beta);
160}
161} // namespace math
162} // namespace stan
163#endif
isfinite_< as_operation_cl_t< T > > isfinite(T &&a)
binomial_coefficient_log_< as_operation_cl_t< T1 >, as_operation_cl_t< T2 > > binomial_coefficient_log(T1 &&a, T2 &&b)
auto transpose(Arg &&a)
Transposes a kernel generator expression.
typename return_type< Ts... >::type return_type_t
Convenience type for the return type of the specified template parameters.
void check_nonnegative(const char *function, const char *name, const T_y &y)
Check if y is non-negative.
bool size_zero(const T &x)
Returns 1 if input is of length 0, returns 0 otherwise.
Definition size_zero.hpp:19
void check_bounded(const char *function, const char *name, const T_y &y, const T_low &low, const T_high &high)
Check if the value is between the low and high values, inclusively.
void check_consistent_size(const char *function, const char *name, const T &x, size_t expected_size)
Check if x is consistent with size expected_size.
T value_of(const fvar< T > &v)
Return the value of the specified variable.
Definition value_of.hpp:18
fvar< T > log_inv_logit(const fvar< T > &x)
auto as_value_column_array_or_scalar(T &&a)
Extract the value from an object and for eigen vectors and std::vectors convert to an eigen column ar...
void check_consistent_sizes(const char *)
Trivial no input case, this function is a no-op.
return_type_t< T_x_cl, T_alpha_cl, T_beta_cl > binomial_logit_glm_lpmf(const T_n_cl &n, const T_N_cl &N, const T_x_cl &x, const T_alpha_cl &alpha, const T_beta_cl &beta)
void check_finite(const char *function, const char *name, const T_y &y)
Return true if all values in y are finite.
auto sum(const std::vector< T > &m)
Return the sum of the entries of the specified standard vector.
Definition sum.hpp:23
int64_t max_size(const T1 &x1, const Ts &... xs)
Calculate the size of the largest input.
Definition max_size.hpp:20
auto as_value_column_vector_or_scalar(T &&a)
Extract values from input argument and transform to a column vector.
fvar< T > beta(const fvar< T > &x1, const fvar< T > &x2)
Return fvar with the beta function applied to the specified arguments and its gradient.
Definition beta.hpp:51
fvar< T > log1m_inv_logit(const fvar< T > &x)
Return the natural logarithm of one minus the inverse logit of the specified argument.
auto make_partials_propagator(Ops &&... ops)
Construct an partials_propagator.
fvar< T > exp(const fvar< T > &x)
Definition exp.hpp:15
typename ref_type_if< Condition, T >::type ref_type_if_t
Definition ref_type.hpp:59
typename partials_return_type< Args... >::type partials_return_t
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...
Template metaprogram to calculate whether a summand needs to be included in a proportional (log) prob...