Automatic Differentiation
 
Loading...
Searching...
No Matches
log_softmax.hpp
Go to the documentation of this file.
1#ifndef STAN_MATH_FWD_FUN_LOG_SOFTMAX_HPP
2#define STAN_MATH_FWD_FUN_LOG_SOFTMAX_HPP
3
10
11namespace stan {
12namespace math {
13
22template <typename T, require_vector_st<is_fvar, T>* = nullptr>
23inline auto log_softmax(T&& x) {
24 return apply_vector_unary<T>::apply(std::forward<T>(x), [](auto&& alpha) {
25 using T_alpha = decltype(alpha);
26 using T_fvar = value_type_t<T_alpha>;
27 using T_fvar_inner = typename T_fvar::Scalar;
28
29 auto&& alpha_ref = to_ref(std::forward<decltype(alpha)>(alpha));
30 Eigen::Matrix<T_fvar_inner, -1, 1> alpha_t = alpha_ref.val();
31 Eigen::Matrix<T_fvar_inner, -1, 1> softmax_alpha_t = softmax(alpha_t);
32
33 Eigen::Matrix<T_fvar, -1, 1> log_softmax_alpha(alpha_ref.size());
34 log_softmax_alpha.val() = log_softmax(alpha_t);
35 log_softmax_alpha.d().setZero();
36
37 for (int m = 0; m < alpha_ref.size(); ++m) {
38 T_fvar_inner negative_alpha_m_d_times_softmax_alpha_t_m
39 = -alpha_ref.coeff(m).d_ * softmax_alpha_t(m);
40 for (int k = 0; k < alpha_ref.size(); ++k) {
41 if (m == k) {
42 log_softmax_alpha(k).d_
43 += alpha_ref.coeff(m).d_
44 + negative_alpha_m_d_times_softmax_alpha_t_m;
45 } else {
46 log_softmax_alpha(k).d_ += negative_alpha_m_d_times_softmax_alpha_t_m;
47 }
48 }
49 }
50
51 return log_softmax_alpha;
52 });
53}
54
55} // namespace math
56} // namespace stan
57#endif
typename value_type< T >::type value_type_t
Helper function for accessing underlying type.
auto softmax(const ColVec &alpha)
Definition softmax.hpp:15
auto log_softmax(T &&x)
Return the log softmax of the specified vector or container of vectors.
ref_type_t< T && > to_ref(T &&a)
This evaluates expensive Eigen expressions.
Definition to_ref.hpp:18
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...