Automatic Differentiation
 
Loading...
Searching...
No Matches
gp_dot_prod_cov.hpp
Go to the documentation of this file.
1#ifndef STAN_MATH_PRIM_FUN_COV_DOT_PROD_HPP
2#define STAN_MATH_PRIM_FUN_COV_DOT_PROD_HPP
3
10#include <vector>
11
12namespace stan {
13namespace math {
14
36template <typename T_x, typename T_sigma>
37Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic, Eigen::Dynamic>
38gp_dot_prod_cov(const std::vector<Eigen::Matrix<T_x, Eigen::Dynamic, 1>> &x,
39 const T_sigma &sigma) {
40 check_not_nan("gp_dot_prod_cov", "sigma", sigma);
41 check_nonnegative("gp_dot_prod_cov", "sigma", sigma);
42 check_finite("gp_dot_prod_cov", "sigma", sigma);
43
44 size_t x_size = x.size();
45 for (size_t i = 0; i < x_size; ++i) {
46 check_not_nan("gp_dot_prod_cov", "x", x[i]);
47 check_finite("gp_dot_prod_cov", "x", x[i]);
48 }
49
50 Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic, Eigen::Dynamic>
51 cov(x_size, x_size);
52 if (x_size == 0) {
53 return cov;
54 }
55
56 T_sigma sigma_sq = square(sigma);
57 size_t block_size = 10;
58
59 for (size_t jb = 0; jb < x_size; jb += block_size) {
60 for (size_t ib = jb; ib < x_size; ib += block_size) {
61 size_t j_end = std::min(x_size, jb + block_size);
62 for (size_t j = jb; j < j_end; ++j) {
63 cov.coeffRef(j, j) = sigma_sq + dot_self(x[j]);
64 size_t i_end = std::min(x_size, ib + block_size);
65 for (size_t i = std::max(ib, j + 1); i < i_end; ++i) {
66 cov.coeffRef(j, i) = cov.coeffRef(i, j)
67 = sigma_sq + dot_product(x[i], x[j]);
68 }
69 }
70 }
71 }
72 cov.coeffRef(x_size - 1, x_size - 1) = sigma_sq + dot_self(x[x_size - 1]);
73 return cov;
74}
75
98template <typename T_x, typename T_sigma>
99Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic, Eigen::Dynamic>
100gp_dot_prod_cov(const std::vector<T_x> &x, const T_sigma &sigma) {
101 check_nonnegative("gp_dot_prod_cov", "sigma", sigma);
102 check_finite("gp_dot_prod_cov", "sigma", sigma);
103
104 size_t x_size = x.size();
105 check_finite("gp_dot_prod_cov", "x", x);
106
107 Eigen::Matrix<return_type_t<T_x, T_sigma>, Eigen::Dynamic, Eigen::Dynamic>
108 cov(x_size, x_size);
109 if (x_size == 0) {
110 return cov;
111 }
112
113 T_sigma sigma_sq = square(sigma);
114 size_t block_size = 10;
115
116 for (size_t jb = 0; jb < x_size; jb += block_size) {
117 for (size_t ib = jb; ib < x_size; ib += block_size) {
118 size_t j_end = std::min(x_size, jb + block_size);
119 for (size_t j = jb; j < j_end; ++j) {
120 cov.coeffRef(j, j) = sigma_sq + x[j] * x[j];
121 size_t i_end = std::min(x_size, ib + block_size);
122 for (size_t i = std::max(ib, j + 1); i < i_end; ++i) {
123 cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq + x[i] * x[j];
124 }
125 }
126 }
127 }
128 cov(x_size - 1, x_size - 1) = sigma_sq + x[x_size - 1] * x[x_size - 1];
129 return cov;
130}
131
154template <typename T_x1, typename T_x2, typename T_sigma>
155Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
156 Eigen::Dynamic>
157gp_dot_prod_cov(const std::vector<Eigen::Matrix<T_x1, Eigen::Dynamic, 1>> &x1,
158 const std::vector<Eigen::Matrix<T_x2, Eigen::Dynamic, 1>> &x2,
159 const T_sigma &sigma) {
160 check_nonnegative("gp_dot_prod_cov", "sigma", sigma);
161 check_finite("gp_dot_prod_cov", "sigma", sigma);
162
163 size_t x1_size = x1.size();
164 size_t x2_size = x2.size();
165 for (size_t i = 0; i < x1_size; ++i) {
166 check_finite("gp_dot_prod_cov", "x1", x1[i]);
167 }
168 for (size_t i = 0; i < x2_size; ++i) {
169 check_finite("gp_dot_prod_cov", "x2", x2[i]);
170 }
171 Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
172 Eigen::Dynamic>
173 cov(x1_size, x2_size);
174
175 if (x1_size == 0 || x2_size == 0) {
176 return cov;
177 }
178
179 T_sigma sigma_sq = square(sigma);
180 size_t block_size = 10;
181
182 for (size_t ib = 0; ib < x1_size; ib += block_size) {
183 for (size_t jb = 0; jb < x2_size; jb += block_size) {
184 size_t j_end = std::min(x2_size, jb + block_size);
185 for (size_t j = jb; j < j_end; ++j) {
186 size_t i_end = std::min(x1_size, ib + block_size);
187 for (size_t i = ib; i < i_end; ++i) {
188 cov(i, j) = sigma_sq + dot_product(x1[i], x2[j]);
189 }
190 }
191 }
192 }
193 return cov;
194}
195
218template <typename T_x1, typename T_x2, typename T_sigma>
219Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
220 Eigen::Dynamic>
221gp_dot_prod_cov(const std::vector<T_x1> &x1, const std::vector<T_x2> &x2,
222 const T_sigma &sigma) {
223 check_nonnegative("gp_dot_prod_cov", "sigma", sigma);
224 check_finite("gp_dot_prod_cov", "sigma", sigma);
225
226 size_t x1_size = x1.size();
227 size_t x2_size = x2.size();
228 check_finite("gp_dot_prod_cov", "x1", x1);
229 check_finite("gp_dot_prod_cov", "x2", x2);
230
231 Eigen::Matrix<return_type_t<T_x1, T_x2, T_sigma>, Eigen::Dynamic,
232 Eigen::Dynamic>
233 cov(x1_size, x2_size);
234
235 if (x1_size == 0 || x2_size == 0) {
236 return cov;
237 }
238
239 T_sigma sigma_sq = square(sigma);
240
241 for (size_t i = 0; i < x1_size; ++i) {
242 for (size_t j = 0; j < x2_size; ++j) {
243 cov(i, j) = sigma_sq + x1[i] * x2[j];
244 }
245 }
246 return cov;
247}
248
249} // namespace math
250} // namespace stan
251
252#endif
auto gp_dot_prod_cov(const T_x &x, const T_sigma sigma)
Dot product kernel on the GPU.
void check_nonnegative(const char *function, const char *name, const T_y &y)
Check if y is non-negative.
void check_finite(const char *function, const char *name, const T_y &y)
Return true if all values in y are finite.
void check_not_nan(const char *function, const char *name, const T_y &y)
Check if y is not NaN.
auto dot_self(const T &a)
Returns squared norm of a vector or matrix.
Definition dot_self.hpp:21
auto dot_product(const T_a &a, const T_b &b)
Returns the dot product of the specified vectors.
fvar< T > square(const fvar< T > &x)
Definition square.hpp:12
The lgamma implementation in stan-math is based on either the reentrant safe lgamma_r implementation ...