Automatic Differentiation
 
Loading...
Searching...
No Matches
gp_dot_prod_cov.hpp
Go to the documentation of this file.
1#ifndef STAN_MATH_PRIM_FUN_COV_DOT_PROD_HPP
2#define STAN_MATH_PRIM_FUN_COV_DOT_PROD_HPP
3
10#include <vector>
11
12namespace stan {
13namespace math {
14
36template <typename T_x, typename T_sigma>
37inline Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic,
38 Eigen::Dynamic>
39gp_dot_prod_cov(const std::vector<Eigen::Matrix<T_x, Eigen::Dynamic, 1>> &x,
40 const T_sigma &sigma) {
41 check_not_nan("gp_dot_prod_cov", "sigma", sigma);
42 check_nonnegative("gp_dot_prod_cov", "sigma", sigma);
43 check_finite("gp_dot_prod_cov", "sigma", sigma);
44
45 size_t x_size = x.size();
46 for (size_t i = 0; i < x_size; ++i) {
47 check_not_nan("gp_dot_prod_cov", "x", x[i]);
48 check_finite("gp_dot_prod_cov", "x", x[i]);
49 }
50
51 Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic, Eigen::Dynamic>
52 cov(x_size, x_size);
53 if (x_size == 0) {
54 return cov;
55 }
56
57 T_sigma sigma_sq = square(sigma);
58 size_t block_size = 10;
59
60 for (size_t jb = 0; jb < x_size; jb += block_size) {
61 for (size_t ib = jb; ib < x_size; ib += block_size) {
62 size_t j_end = std::min(x_size, jb + block_size);
63 for (size_t j = jb; j < j_end; ++j) {
64 cov.coeffRef(j, j) = sigma_sq + dot_self(x[j]);
65 size_t i_end = std::min(x_size, ib + block_size);
66 for (size_t i = std::max(ib, j + 1); i < i_end; ++i) {
67 cov.coeffRef(j, i) = cov.coeffRef(i, j)
68 = sigma_sq + dot_product(x[i], x[j]);
69 }
70 }
71 }
72 }
73 cov.coeffRef(x_size - 1, x_size - 1) = sigma_sq + dot_self(x[x_size - 1]);
74 return cov;
75}
76
99template <typename T_x, typename T_sigma>
100inline Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic,
101 Eigen::Dynamic>
102gp_dot_prod_cov(const std::vector<T_x> &x, const T_sigma &sigma) {
103 check_nonnegative("gp_dot_prod_cov", "sigma", sigma);
104 check_finite("gp_dot_prod_cov", "sigma", sigma);
105
106 size_t x_size = x.size();
107 check_finite("gp_dot_prod_cov", "x", x);
108
109 Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic, Eigen::Dynamic>
110 cov(x_size, x_size);
111 if (x_size == 0) {
112 return cov;
113 }
114
115 T_sigma sigma_sq = square(sigma);
116 size_t block_size = 10;
117
118 for (size_t jb = 0; jb < x_size; jb += block_size) {
119 for (size_t ib = jb; ib < x_size; ib += block_size) {
120 size_t j_end = std::min(x_size, jb + block_size);
121 for (size_t j = jb; j < j_end; ++j) {
122 cov.coeffRef(j, j) = sigma_sq + x[j] * x[j];
123 size_t i_end = std::min(x_size, ib + block_size);
124 for (size_t i = std::max(ib, j + 1); i < i_end; ++i) {
125 cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq + x[i] * x[j];
126 }
127 }
128 }
129 }
130 cov(x_size - 1, x_size - 1) = sigma_sq + x[x_size - 1] * x[x_size - 1];
131 return cov;
132}
133
156template <typename T_x1, typename T_x2, typename T_sigma>
157inline Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
158 Eigen::Dynamic>
159gp_dot_prod_cov(const std::vector<Eigen::Matrix<T_x1, Eigen::Dynamic, 1>> &x1,
160 const std::vector<Eigen::Matrix<T_x2, Eigen::Dynamic, 1>> &x2,
161 const T_sigma &sigma) {
162 check_nonnegative("gp_dot_prod_cov", "sigma", sigma);
163 check_finite("gp_dot_prod_cov", "sigma", sigma);
164
165 size_t x1_size = x1.size();
166 size_t x2_size = x2.size();
167 for (size_t i = 0; i < x1_size; ++i) {
168 check_finite("gp_dot_prod_cov", "x1", x1[i]);
169 }
170 for (size_t i = 0; i < x2_size; ++i) {
171 check_finite("gp_dot_prod_cov", "x2", x2[i]);
172 }
173 Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
174 Eigen::Dynamic>
175 cov(x1_size, x2_size);
176
177 if (x1_size == 0 || x2_size == 0) {
178 return cov;
179 }
180
181 T_sigma sigma_sq = square(sigma);
182 size_t block_size = 10;
183
184 for (size_t ib = 0; ib < x1_size; ib += block_size) {
185 for (size_t jb = 0; jb < x2_size; jb += block_size) {
186 size_t j_end = std::min(x2_size, jb + block_size);
187 for (size_t j = jb; j < j_end; ++j) {
188 size_t i_end = std::min(x1_size, ib + block_size);
189 for (size_t i = ib; i < i_end; ++i) {
190 cov(i, j) = sigma_sq + dot_product(x1[i], x2[j]);
191 }
192 }
193 }
194 }
195 return cov;
196}
197
220template <typename T_x1, typename T_x2, typename T_sigma>
221inline Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
222 Eigen::Dynamic>
223gp_dot_prod_cov(const std::vector<T_x1> &x1, const std::vector<T_x2> &x2,
224 const T_sigma &sigma) {
225 check_nonnegative("gp_dot_prod_cov", "sigma", sigma);
226 check_finite("gp_dot_prod_cov", "sigma", sigma);
227
228 size_t x1_size = x1.size();
229 size_t x2_size = x2.size();
230 check_finite("gp_dot_prod_cov", "x1", x1);
231 check_finite("gp_dot_prod_cov", "x2", x2);
232
233 Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
234 Eigen::Dynamic>
235 cov(x1_size, x2_size);
236
237 if (x1_size == 0 || x2_size == 0) {
238 return cov;
239 }
240
241 T_sigma sigma_sq = square(sigma);
242
243 for (size_t i = 0; i < x1_size; ++i) {
244 for (size_t j = 0; j < x2_size; ++j) {
245 cov(i, j) = sigma_sq + x1[i] * x2[j];
246 }
247 }
248 return cov;
249}
250
251} // namespace math
252} // namespace stan
253
254#endif
auto gp_dot_prod_cov(const T_x &x, const T_sigma sigma)
Dot product kernel on the GPU.
void check_nonnegative(const char *function, const char *name, const T_y &y)
Check if y is non-negative.
void check_finite(const char *function, const char *name, const T_y &y)
Return true if all values in y are finite.
void check_not_nan(const char *function, const char *name, const T_y &y)
Check if y is not NaN.
auto dot_self(const T &a)
Returns squared norm of a vector or matrix.
Definition dot_self.hpp:21
auto dot_product(const T_a &a, const T_b &b)
Returns the dot product of the specified vectors.
fvar< T > square(const fvar< T > &x)
Definition square.hpp:12
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...