1#ifndef STAN_MATH_PRIM_FUN_COV_DOT_PROD_HPP
2#define STAN_MATH_PRIM_FUN_COV_DOT_PROD_HPP
36template <
typename T_x,
typename T_sigma>
37inline Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic,
40 const T_sigma &sigma) {
45 size_t x_size = x.size();
46 for (
size_t i = 0; i < x_size; ++i) {
51 Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic, Eigen::Dynamic>
57 T_sigma sigma_sq =
square(sigma);
58 size_t block_size = 10;
60 for (
size_t jb = 0; jb < x_size; jb += block_size) {
61 for (
size_t ib = jb; ib < x_size; ib += block_size) {
62 size_t j_end = std::min(x_size, jb + block_size);
63 for (
size_t j = jb; j < j_end; ++j) {
64 cov.coeffRef(j, j) = sigma_sq +
dot_self(x[j]);
65 size_t i_end = std::min(x_size, ib + block_size);
66 for (
size_t i = std::max(ib, j + 1); i < i_end; ++i) {
67 cov.coeffRef(j, i) = cov.coeffRef(i, j)
73 cov.coeffRef(x_size - 1, x_size - 1) = sigma_sq +
dot_self(x[x_size - 1]);
99template <
typename T_x,
typename T_sigma>
100inline Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic,
106 size_t x_size = x.size();
109 Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic, Eigen::Dynamic>
115 T_sigma sigma_sq =
square(sigma);
116 size_t block_size = 10;
118 for (
size_t jb = 0; jb < x_size; jb += block_size) {
119 for (
size_t ib = jb; ib < x_size; ib += block_size) {
120 size_t j_end = std::min(x_size, jb + block_size);
121 for (
size_t j = jb; j < j_end; ++j) {
122 cov.coeffRef(j, j) = sigma_sq + x[j] * x[j];
123 size_t i_end = std::min(x_size, ib + block_size);
124 for (
size_t i = std::max(ib, j + 1); i < i_end; ++i) {
125 cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq + x[i] * x[j];
130 cov(x_size - 1, x_size - 1) = sigma_sq + x[x_size - 1] * x[x_size - 1];
156template <
typename T_x1,
typename T_x2,
typename T_sigma>
157inline Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
160 const std::vector<Eigen::Matrix<T_x2, Eigen::Dynamic, 1>> &x2,
161 const T_sigma &sigma) {
165 size_t x1_size = x1.size();
166 size_t x2_size = x2.size();
167 for (
size_t i = 0; i < x1_size; ++i) {
170 for (
size_t i = 0; i < x2_size; ++i) {
173 Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
175 cov(x1_size, x2_size);
177 if (x1_size == 0 || x2_size == 0) {
181 T_sigma sigma_sq =
square(sigma);
182 size_t block_size = 10;
184 for (
size_t ib = 0; ib < x1_size; ib += block_size) {
185 for (
size_t jb = 0; jb < x2_size; jb += block_size) {
186 size_t j_end = std::min(x2_size, jb + block_size);
187 for (
size_t j = jb; j < j_end; ++j) {
188 size_t i_end = std::min(x1_size, ib + block_size);
189 for (
size_t i = ib; i < i_end; ++i) {
220template <
typename T_x1,
typename T_x2,
typename T_sigma>
221inline Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
224 const T_sigma &sigma) {
228 size_t x1_size = x1.size();
229 size_t x2_size = x2.size();
233 Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
235 cov(x1_size, x2_size);
237 if (x1_size == 0 || x2_size == 0) {
241 T_sigma sigma_sq =
square(sigma);
243 for (
size_t i = 0; i < x1_size; ++i) {
244 for (
size_t j = 0; j < x2_size; ++j) {
245 cov(i, j) = sigma_sq + x1[i] * x2[j];
auto gp_dot_prod_cov(const T_x &x, const T_sigma sigma)
Dot product kernel on the GPU.
void check_nonnegative(const char *function, const char *name, const T_y &y)
Check if y is non-negative.
void check_finite(const char *function, const char *name, const T_y &y)
Return true if all values in y are finite.
void check_not_nan(const char *function, const char *name, const T_y &y)
Check if y is not NaN.
auto dot_self(const T &a)
Returns squared norm of a vector or matrix.
auto dot_product(const T_a &a, const T_b &b)
Returns the dot product of the specified vectors.
fvar< T > square(const fvar< T > &x)
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...