1#ifndef STAN_MATH_PRIM_FUN_COV_DOT_PROD_HPP
2#define STAN_MATH_PRIM_FUN_COV_DOT_PROD_HPP
36template <
typename T_x,
typename T_sigma>
37Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic, Eigen::Dynamic>
39 const T_sigma &sigma) {
44 size_t x_size = x.size();
45 for (
size_t i = 0; i < x_size; ++i) {
50 Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic, Eigen::Dynamic>
56 T_sigma sigma_sq =
square(sigma);
57 size_t block_size = 10;
59 for (
size_t jb = 0; jb < x_size; jb += block_size) {
60 for (
size_t ib = jb; ib < x_size; ib += block_size) {
61 size_t j_end = std::min(x_size, jb + block_size);
62 for (
size_t j = jb; j < j_end; ++j) {
63 cov.coeffRef(j, j) = sigma_sq +
dot_self(x[j]);
64 size_t i_end = std::min(x_size, ib + block_size);
65 for (
size_t i = std::max(ib, j + 1); i < i_end; ++i) {
66 cov.coeffRef(j, i) = cov.coeffRef(i, j)
72 cov.coeffRef(x_size - 1, x_size - 1) = sigma_sq +
dot_self(x[x_size - 1]);
98template <
typename T_x,
typename T_sigma>
99Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic, Eigen::Dynamic>
104 size_t x_size = x.size();
107 Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic, Eigen::Dynamic>
113 T_sigma sigma_sq =
square(sigma);
114 size_t block_size = 10;
116 for (
size_t jb = 0; jb < x_size; jb += block_size) {
117 for (
size_t ib = jb; ib < x_size; ib += block_size) {
118 size_t j_end = std::min(x_size, jb + block_size);
119 for (
size_t j = jb; j < j_end; ++j) {
120 cov.coeffRef(j, j) = sigma_sq + x[j] * x[j];
121 size_t i_end = std::min(x_size, ib + block_size);
122 for (
size_t i = std::max(ib, j + 1); i < i_end; ++i) {
123 cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq + x[i] * x[j];
128 cov(x_size - 1, x_size - 1) = sigma_sq + x[x_size - 1] * x[x_size - 1];
154template <
typename T_x1,
typename T_x2,
typename T_sigma>
155Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
158 const std::vector<Eigen::Matrix<T_x2, Eigen::Dynamic, 1>> &x2,
159 const T_sigma &sigma) {
163 size_t x1_size = x1.size();
164 size_t x2_size = x2.size();
165 for (
size_t i = 0; i < x1_size; ++i) {
168 for (
size_t i = 0; i < x2_size; ++i) {
171 Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
173 cov(x1_size, x2_size);
175 if (x1_size == 0 || x2_size == 0) {
179 T_sigma sigma_sq =
square(sigma);
180 size_t block_size = 10;
182 for (
size_t ib = 0; ib < x1_size; ib += block_size) {
183 for (
size_t jb = 0; jb < x2_size; jb += block_size) {
184 size_t j_end = std::min(x2_size, jb + block_size);
185 for (
size_t j = jb; j < j_end; ++j) {
186 size_t i_end = std::min(x1_size, ib + block_size);
187 for (
size_t i = ib; i < i_end; ++i) {
218template <
typename T_x1,
typename T_x2,
typename T_sigma>
219Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
222 const T_sigma &sigma) {
226 size_t x1_size = x1.size();
227 size_t x2_size = x2.size();
231 Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
233 cov(x1_size, x2_size);
235 if (x1_size == 0 || x2_size == 0) {
239 T_sigma sigma_sq =
square(sigma);
241 for (
size_t i = 0; i < x1_size; ++i) {
242 for (
size_t j = 0; j < x2_size; ++j) {
243 cov(i, j) = sigma_sq + x1[i] * x2[j];
auto gp_dot_prod_cov(const T_x &x, const T_sigma sigma)
Dot product kernel on the GPU.
void check_nonnegative(const char *function, const char *name, const T_y &y)
Check if y is non-negative.
void check_finite(const char *function, const char *name, const T_y &y)
Return true if all values in y are finite.
void check_not_nan(const char *function, const char *name, const T_y &y)
Check if y is not NaN.
auto dot_self(const T &a)
Returns squared norm of a vector or matrix.
auto dot_product(const T_a &a, const T_b &b)
Returns the dot product of the specified vectors.
fvar< T > square(const fvar< T > &x)
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...