1 #ifndef STAN_MATH_MIX_MAT_FUNCTOR_GRAD_TR_MAT_TIMES_HESSIAN_HPP
2 #define STAN_MATH_MIX_MAT_FUNCTOR_GRAD_TR_MAT_TIMES_HESSIAN_HPP
21 const Eigen::Matrix<double, Dynamic, 1>& x,
22 const Eigen::Matrix<double, Dynamic, Dynamic>& M,
23 Eigen::Matrix<double, Dynamic, 1>& grad_tr_MH) {
27 grad_tr_MH.resize(x.size());
29 Matrix<var, Dynamic, 1> x_var(x.size());
30 for (
int i = 0; i < x.size(); ++i)
33 Matrix<fvar<var>, Dynamic, 1> x_fvar(x.size());
36 Matrix<double, Dynamic, 1> M_n(x.size());
37 for (
int n = 0; n < x.size(); ++n) {
38 for (
int k = 0; k < x.size(); ++k)
40 for (
int k = 0; k < x.size(); ++k)
44 gradient_dot_vector<fvar<var>,
double>(f, x_fvar, M_n, fx,
46 sum += grad_fx_dot_v.
d_;
50 for (
int i = 0; i < x.size(); ++i)
51 grad_tr_MH(i) = x_var(i).adj();
52 }
catch (
const std::exception&
e) {
fvar< T > sum(const std::vector< fvar< T > > &m)
Return the sum of the entries of the specified standard vector.
void grad_tr_mat_times_hessian(const F &f, const Eigen::Matrix< double, Dynamic, 1 > &x, const Eigen::Matrix< double, Dynamic, Dynamic > &M, Eigen::Matrix< double, Dynamic, 1 > &grad_tr_MH)
static void grad(chainable *vi)
Compute the gradient for all variables starting from the specified root variable implementation.
Independent (input) and dependent (output) variables for gradients.
double e()
Return the base of the natural logarithm.
static void recover_memory_nested()
Recover only the memory used for the top nested call.
static void start_nested()
Record the current position so that recover_memory_nested() can find it.