-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLinearPrediction.h
124 lines (110 loc) · 2.72 KB
/
LinearPrediction.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
#ifndef LINEAR_PREDICTION
#define LINEAR_PREDICTION
enum DEAL_WITH_DIV_EIGENVALUES {SET_ZERO=0, NORMALIZE=1, INVERT=2};
MatrixXd pseudoInv (const MatrixXd &Min, double delta=1e-6)
{
JacobiSVD<MatrixXd> Jack(Min,ComputeFullU|ComputeFullV);
VectorXd Sinv = Jack.singularValues();
for (long i=0; i<Sinv.rows(); ++i)
{
if (Sinv(i) > delta)
{
Sinv(i) = 1./Sinv(i);
}
else
{
Sinv(i) = 0;
}
}
return Jack.matrixV() * Sinv.asDiagonal() * Jack.matrixU().transpose();
}
VectorXd linearPrediction (const VectorXd &x, int N_new, DEAL_WITH_DIV_EIGENVALUES VARIANT=SET_ZERO, double delta=0.)
{
int L = x.rows()/2;
MatrixXd R(L,L);
R.setZero();
for (int i=0; i<L; ++i)
for (int j=0; j<L; ++j)
for (int n=L; n<x.rows(); ++n)
{
R(i,j) += x(n-i-1)*x(n-j-1);
}
VectorXd r(L);
r.setZero();
for (int i=0; i<L; ++i)
for (int n=L; n<x.rows(); ++n)
{
r(i) -= x(n-i-1)*x(n);
}
VectorXd a;
// if (delta==0.)
// {
// FullPivLU<MatrixXd> LinSolver(R);
// a = LinSolver.solve(r);
a = R.fullPivHouseholderQr().solve(r);
// a = R.fullPivLu().solve(r);
// }
// //using pseudo-inverse; this is really bad for simple cosine points (???)
// else
// {
// a = pseudoInv(R,1e-7)*r;
// }
// test a:
// for (int n=x.rows()/2; n<x.rows(); ++n)
// {
// double xx = 0.;
// for (int j=0; j<L; ++j)
// {
// xx -= a(j)*x(n-j-1);
// }
// cout << x(n) << "\t" << xx << endl;
// }
MatrixXd M(L,L);
M.setZero();
M.diagonal<-1>().setConstant(1.);
M.row(0) = -a.transpose();
EigenSolver<MatrixXd> Eugen(M);
VectorXcd lambda = Eugen.eigenvalues();
int N_div = 0;
for (int i=0; i<lambda.rows(); ++i)
{
// deal with divergent eigenvalues:
if (abs(lambda(i))>1)
{
lout << "i=" << i << "\tlambda(i)=" << lambda(i) << ", abs=" << abs(lambda(i)) << ", VARIANT=" << VARIANT << endl;
if (VARIANT==SET_ZERO)
{
lout << "setting to zero!" << endl;
lambda(i) = 0;
}
else if (VARIANT==NORMALIZE)
{
lout << "normalizing!" << endl;
lambda(i) = lambda(i)/abs(lambda(i));
}
else
{
lout << "inverting!" << endl;
lambda(i) = 1./conj(lambda(i));
}
++N_div;
}
}
cout << "#divergent eigenvalues: " << N_div << endl;
VectorXcd xl = Eugen.eigenvectors().row(0);
VectorXcd xr = Eugen.eigenvectors().inverse() * x.head(L).reverse();
VectorXd Vout(N_new);
for (int k=x.rows()-L; k<x.rows()-L+N_new; ++k)
{
complex<double> x_new = xl.transpose() * lambda.array().pow(k+1).matrix().asDiagonal() * xr;
Vout(k-x.rows()+L) = x_new.real();
}
return Vout;
}
void insert_linearPrediction (VectorXd &x, int N_new, DEAL_WITH_DIV_EIGENVALUES VARIANT=SET_ZERO, double delta=0.)
{
VectorXd pred = linearPrediction(x,N_new,VARIANT,delta);
x.conservativeResize(x.rows()+N_new);
x.tail(N_new) = pred;
}
#endif