-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmulticopulafit.m
569 lines (516 loc) · 21.6 KB
/
multicopulafit.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
function varargout = copulafit(family,u,varargin)
%COPULAFIT Fit a parametric copula to data.
% RHOHAT = COPULAFIT('Gaussian', U) returns an estimate RHOHAT of the matrix
% of linear correlation parameters for a Gaussian copula, given data in U. U
% is an N-by-P matrix of values in (0,1), representing N points in the
% P-dimensional unit hypercube.
%
% [RHOHAT, NUHAT] = COPULAFIT('t', U) returns an estimate RHOHAT of the matrix
% of linear correlation parameters for a t copula, and an estimate NUHAT of
% the degrees of freedom parameter, given data in U. U is an N-by-P matrix of
% values in (0,1), representing N points in the P-dimensional unit hypercube.
%
% [RHOHAT, NUHAT, NUCI] = COPULAFIT('t', U) returns an approximate 95%
% confidence interval for the degrees of freedom parameter for a t copula,
% given data in U.
%
% PARAMHAT = COPULAFIT(FAMILY, U) returns an estimate PARAMHAT of the copula
% parameter for an Archimedean copula specified by FAMILY, given data in U. U
% is an N-by-2 matrix of values in (0,1), representing N points in the unit
% square. FAMILY is 'Clayton', 'Frank', or 'Gumbel'.
%
% [PARAMHAT, PARAMCI] = COPULAFIT(FAMILY, U) returns an approximate 95%
% confidence interval for the copula parameter from an Archimedean copula
% specified by FAMILY, given data in U.
%
% [...] = COPULAFIT(..., 'Alpha', ALPHA) returns an approximate 100(1-ALPHA)%
% confidence interval for the parameter estimate.
%
% COPULAFIT uses maximum likelihood to fit the copula to U. When U contains
% data transformed to the unit hypercube by parametric estimates of their
% marginal cumulative distribution functions, this is known as the Inference
% Functions for Margins (IFM) method. When U contains data transformed by
% the empirical CDF, this is known as Canonical Maximum Likelihood (CML).
%
% [...] = COPULAFIT('t', U, ..., 'Method', 'ApproximateML') fits a t copula by
% maximizing an objective function, as suggested by Bouye et al., that
% approximates the profile log-likelihood for the degrees of freedom parameter
% nu, for large sample sizes. This method can be significantly faster than
% using maximum likelihood, however, it should be used with caution because
% the estimates and confidence limits may not be accurate for small or
% moderate sample sizes. COPULAFIT('t', U, ..., 'Method', 'ML') is equivalent
% to the default maximum likelihood fit.
%
% [...] = COPULAFIT(..., 'Options', OPTIONS) specifies control parameters for
% the iterative algorithm used to compute estimates. This argument can be
% created by a call to STATSET. See STATSET('copulafit') for parameter names
% and default values. This argument does not apply to the 'Gaussian' family.
%
% See also ECDF, COPULACDF, COPULAPDF, COPULARND.
% References:
% [1] Bouye, E., Durrleman, V., Nikeghbali, A., Riboulet, G., Roncalli, T.
% (2000), "Copulas for Finance: A Reading Guide and Some Applications",
% Working Paper, Groupe de Recherche Operationnelle, Credit Lyonnais.
% Copyright 2007-2011 The MathWorks, Inc.
if nargin < 2
error(message('stats:copulafit:TooFewInputs'));
end
[~,d] = size(u);
if ndims(u)~=2 || d<2
error(message('stats:copulafit:InvalidDataDimensions'));
elseif ~all(all(0 < u & u < 1))
error(message('stats:copulafit:DataOutOfRange'));
end
families = {'gaussian','t','clayton','frank','gumbel'};
family = internal.stats.getParamVal(family,families,'FAMILY');
pnames = {'alpha' 'method', 'options'};
dflts = { .05 'ml' [] };
[alpha,method,options] = internal.stats.parseArgs(pnames, dflts, varargin{:});
if ~(isscalar(alpha) && (0 < alpha && alpha < 1))
error(message('stats:copulafit:InvalidAlpha'));
end
zcrit = norminv([alpha/2 1-alpha/2]);
methods = {'ml' 'approximateml'};
method = internal.stats.getParamVal(method,methods,'METHOD');
if ~strcmp(method,'ml') && ~strcmp(family,'t')
error(message('stats:copulafit:MethodNotSupported', method));
end
options = statset(statset('copulafit'), options);
switch family
case 'gaussian'
if nargout > 1
error(message('stats:copulafit:TooManyOutputs'));
end
% Transform to z scale, and compute Rho
z = norminv(u);
RhoHat = corrcoef(z);
varargout{1} = RhoHat;
case 't'
if nargout > 3
error(message('stats:copulafit:TooManyOutputs'));
end
% These vars are used to save intermediate estimates of Rho or R =
% chol(Rho) between iterations of the optimization for nu, since the final
% estimate from the previous value of nu will make a good starting point
% for the new value of nu.
Rsaved = []; Rhosaved = [];
% Use FMINBND to maximize the profile likelihood for nu.
%
% Could minimize the full likelihood directly, but that tends to be slow
% because of all the calls to TINV. Also, the optimizer can have trouble
% because there's often a long flat valley, and in high dimensions that's hard
% to deal with. Better to maximize the likelihood with a two-step iteration.
if 0 %strcmp(method,'ml')
profileFun = @profileNLL_t;
else %if strcmp(method,'approximateml')
profileFun = @approxProfileNLL_t;
end
lowerBnd = 1 + options.TolBnd; % limit d.f. to be > 1
[lowerBnd,upperBnd] = bracket1D(profileFun,lowerBnd,5); % 'upper', search ascending from 5
if ~isfinite(upperBnd)
error(message('stats:copulafit:NoUpperBndDF'));
end
opts = optimset(options);
[nuHat,~,err,output] = fminbnd(profileFun, lowerBnd, upperBnd, opts);
if (err == 0)
% fminbnd may print its own output text; in any case give something
% more statistical here, controllable via warning IDs.
if output.funcCount >= options.MaxFunEvals
warning(message('stats:copulafit:EvalLimit'));
else
warning(message('stats:copulafit:IterLimit'));
end
elseif (err < 0)
error(message('stats:copulafit:NoSolution'));
end
[~,RhoHat] = profileFun(nuHat);
RhoHat(1:(d+1):d*d) = 1; % guarantee ones along the diagonal
if (nargout > 2) && (nuHat <= lowerBnd)
warning(message('stats:copulafit:NuAtBoundary'));
end
varargout(1:2) = {RhoHat nuHat};
if nargout > 2
% This warns if the 2nd deriv is not positive.
d2 = d2profileLL_t(nuHat,profileFun);
se = sqrt(1/d2);
varargout{3} = nuHat + se*zcrit;
end
case {'clayton' 'frank' 'gumbel'}
if nargout > 2
error(message('stats:copulafit:TooManyOutputs'));
elseif d > 2
error(message('stats:copulafit:TooManyDimensions'));
end
switch family
case 'clayton' % a.k.a. Cook-Johnson
nloglf = @negloglike_clayton;
lowerBnd = options.TolBnd;
case 'frank'
nloglf = @negloglike_frank;
[~,lowerBnd] = bracket1D(nloglf,5,-5); % 'lower', search descending from -5
if ~isfinite(lowerBnd)
lowerBnd = 0;
% error(message('stats:copulafit:NoLowerBnd'));
end
case 'gumbel' % a.k.a. Gumbel-Hougaard
nloglf = @negloglike_gumbel;
lowerBnd = 1 + options.TolBnd;
end
[lowerBnd,upperBnd] = bracket1D(nloglf,lowerBnd,5); % 'upper', search ascending from 5
if ~isfinite(upperBnd)
upperBnd = 1e100;
%error(message('stats:copulafit:NoUpperBnd'));
end
opts = optimset(options);
[alphaHat,~,err,output] = fminbnd(nloglf, lowerBnd, upperBnd, opts);
if (err == 0)
% fminbnd may print its own output text; in any case give something
% more statistical here, controllable via warning IDs.
if output.funcCount >= options.MaxFunEvals
warning(message('stats:copulafit:EvalLimit'));
else
warning(message('stats:copulafit:IterLimit'));
end
elseif (err < 0)
err;
% error(message('stats:copulafit:NoSolution'));
end
varargout{1} = alphaHat;
if nargout > 1
[~,d2] = nloglf(alphaHat);
se = sqrt(1/d2);
varargout{2} = alphaHat + se*zcrit;
end
end
%----------------------------------------------------------------------
%
% Log-likelihood and profile LL functions for the t copula (nested within main function)
%
function [nll,Rho,R] = profileNLL_t(nu)
% Compute profile negative log-likelihood for a t copula at nu, by
% maximizing over R = chol(Rho)
% Transform to t scale, and compute initial estimate for Rho and its
% Cholesky factor
t_ = tinvLocal(u,nu);
if isempty(Rsaved)
Rho = corrcoef(t_);
if Rho(1,2) >= 1
Rho(1,2) = 1-1e-6;
Rho(2,1) = 1-1e-6;
elseif Rho(1,2) <= -1
Rho(1,2) = -1+1e-6;
Rho(2,1) = -1+1e-6;
end
[R,p_] = chol(Rho);
if p_ > 0 || any(isnan(diag(R)))
error(message('stats:copulafit:RhoRankDeficient'));
end
else
% Use the final estimate of R from the previous iteration as the
% starting value for this iteration, if it's available (in the
% parent's workspace).
R = Rsaved;
end
% Compute Rho using conditional maximum likelihood, given nu
funfcn = {'fungrad' 'copulafit' @(params) profileObjFun(params,nu,t_) [] []};
start_ = tovector(R);
% The tolerances need to be fairly tight here, particularly when computing
% the second derivative, since otherwise the computed profile likelihood
% can fail to be concave upward.
dfltOptions = struct('DerivativeCheck','off', 'HessMult',[], ...
'HessPattern',ones(length(start_)), 'PrecondBandWidth',Inf, ...
'TypicalX',ones(length(start_),1), 'MaxPCGIter',1, 'TolPCG',0.1, ...
'TolX',1e-8, 'TolFun',1e-8, 'MaxIter',1000, 'MaxFunEvals',5000, ...
'Display','off');
[params,nll,~,err,output] = ...
statsfminbx(funfcn, start_, -Inf(size(start_)), Inf(size(start_)), [], dfltOptions, 1);
if (err == 0)
% statsfminbx is forced to be silent, but give a warning here,
% controllable via warning IDs.
if output.funcCount >= options.MaxFunEvals
warning(message('stats:copulafit:ProfileEvalLimit', sprintf( '%g', nu )));
else
warning(message('stats:copulafit:ProfileIterLimit', sprintf( '%g', nu )));
end
elseif (err < 0)
error(message('stats:copulafit:NoProfile', sprintf( '%g', nu )));
end
R = tomatrix(params);
Rho = R'*R;
% Save the final estimate of R in the parent's workspace for use as a
% starting value in the next iteration.
Rsaved = R;
end
function [nll,Rho] = approxProfileNLL_t(nu)
% Bouye' et al's iterative solution for Rho, given nu. This is
% motivated as a profile likelihood, although it isn't, but leads
% to estimates of Rho and nu that are typically good approximations
% to the MLEs when the sample size is large enough. However, even for
% moderately large sample sizes, the criterion can have multiple minima.
% Transform to t scale, and compute initial estimate for Rho
[n_,d_] = size(u);
t_ = tinvLocal(u,nu);
if isempty(Rhosaved)
Rho = corrcoef(t_);
else
% Use the final estimate of Rho from the previous iteration as the
% starting value for this iteration, if it's available (in the
% parent's workspace).
Rho = Rhosaved;
end
if Rho(1,2) >= 1
Rho(1,2) = 1-1e-6;
Rho(2,1) = 1-1e-6;
elseif Rho(1,2) <= -1
Rho(1,2) = -1+1e-6;
Rho(2,1) = -1+1e-6;
end
% Compute Rho as a fixed point of Bouye's iteration, given nu
tol = 1e-8;
maxiter = 100;
for jj = 1:maxiter
if Rho(1,2) >= 1
Rho(1,2) = 1-1e-6;
Rho(2,1) = 1-1e-6;
elseif Rho(1,2) <= -1
Rho(1,2) = -1+1e-6;
Rho(2,1) = -1+1e-6;
end
RhoOld = Rho;
[R,p_] = chol(Rho);
if p_ > 0
error(message('stats:copulafit:RhoRankDeficient'));
end
tRinv = t_ / R;
% s_ = t_ ./ sqrt(repmat(1 + sum(tRinv.^2,2)/nu, 1, d));
s_ = bsxfun(@rdivide, t_, sqrt(1 + sum(tRinv.^2,2)/nu));
Rho = ((nu+d)./nu).*(s_'*s_)/n_;
% Renormalize at each iteration
c_ = sqrt(diag(Rho)); % sqrt first to avoid under/overflow
cc_ = c_*c_'; cc_(1:(d_+1):end) = diag(Rho); % remove roundoff on diag
Rho = Rho ./ cc_;
if Rho(1,2) >= 1
Rho(1,2) = 1-1e-6;
Rho(2,1) = 1-1e-6;
elseif Rho(1,2) <= -1
Rho(1,2) = -1+1e-6;
Rho(2,1) = -1+1e-6;
end
if norm(Rho-RhoOld) < tol*norm(Rho), break; end
end
if jj > maxiter
warning(message('stats:copulafit:RhoEstimateFailedConvergence'));
end
% Compute negative log-likelihood at nu and conditional Rho estimate
nll = negloglike_t(nu,chol(Rho),t_);
% Save the final estimate of Rho in the parent's workspace for use as a
% starting value in the next iteration.
Rhosaved = Rho;
end
%----------------------------------------------------------------------
%
% Log-likelihood functions for Archimedean copulas (nested within main function)
%
function [nll,d2] = negloglike_clayton(alpha)
% C(u1,u2) = (u1^(-alpha) + u2^(-alpha) - 1)^(-1/alpha)
powu = u.^(-alpha);
lnu = log(u);
logC = (-1./alpha).*log(sum(powu, 2) - 1);
logy = log(alpha+1) + (2.*alpha+1).*logC - (alpha+1).*sum(lnu, 2);
nll = -sum(logy);
if nargout > 1
% Return approximate 2nd derivative of the neg loglikelihood,
% using -E[score^2] = E[hessian]
dlogy = 1./(1+alpha) - logC./alpha ...
+ (2+1./alpha)*sum(powu.*lnu, 2)./(sum(powu, 2) - 1) - sum(lnu, 2);
d2 = sum(dlogy.^2);
end
end
function [nll,d2,dlogy] = negloglike_frank(alpha)
% C(u1,u2) = -(1/alpha)*log(1 + (exp(-alpha*u1)-1)*(exp(-alpha*u1)-1)/(exp(-alpha)-1))
expau = exp(alpha .* u);
sumu = sum(u, 2);
if abs(alpha) < 1e-5
logy = 2*alpha*prod(u-.5,2); % -> zero as alpha -> 0
else
logy = log(-alpha.*expm1(-alpha)) + alpha.*sumu ...
- 2*log(abs(1 + exp(alpha.*(sumu - 1)) - sum(expau, 2)));
end
nll = -sum(logy);
if nargout > 1
% Return approximate 2nd derivative of the neg loglikelihood,
% using -E[score^2] = E[hessian]
if abs(alpha) < 1e-5
dlogy = 2*prod(u-.5,2);
else
dlogy = 1./alpha + 1./expm1(alpha) + sumu ...
- 2*((sumu-1).*exp(alpha.*(sumu-1)) - sum(u.*expau, 2)) ...
./ (1 + exp(alpha.*(sumu-1)) - sum(expau, 2));
end
d2 = sum(dlogy.^2);
end
end
function [nll,d2] = negloglike_gumbel(alpha)
% C(u1,u2) = exp(-((-log(u1))^alpha + (-log(u2))^alpha)^(1/alpha))
v = -log(u); % u is strictly in (0,1) => v strictly in (0,Inf)
v = sort(v,2); vmin = v(:,1); vmax = v(:,2); % min/max, but avoid dropping NaNs
logv = log(v);
nlogC = vmax.*(1+(vmin./vmax).^alpha).^(1./alpha);
lognlogC = log(nlogC);
logy = log(alpha - 1 + nlogC) ...
- nlogC + sum((alpha-1).*logv + v, 2) + (1-2*alpha).*lognlogC;
nll = -sum(logy);
if nargout > 1
% Return approximate 2nd derivative of the neg loglikelihood,
% using -E[score^2] = E[hessian]
dnlogC = nlogC .* (-lognlogC + sum(logv.*v.^alpha, 2)./sum(v.^alpha, 2)) ./ alpha;
dlogy = (1+dnlogC)./(alpha-1+nlogC) - dnlogC ...
+ sum(logv, 2) + (1-2*alpha)*dnlogC./nlogC - 2*lognlogC;
d2 = sum(dlogy.^2);
end
end
end
%----------------------------------------------------------------------
%
% Log-likelihood and profile LL functions for the t copula (not nested)
%
function [obj,grad] = profileObjFun(params,nu,t)
R = tomatrix(params);
obj = negloglike_t(nu, R, t);
delta = eps(class(params))^(1/4);
% Compute a two-point central difference approximation to the gradient.
if nargout == 2
% This assumes that the finite diff steps will remain within the
% parameter space. The parameterization should ensure that.
deltaparams = delta*max(abs(params), 1); % limit smallest absolute step
e = zeros(size(params));
grad = zeros(size(params));
for j = 1:length(params)
e(j) = deltaparams(j);
Rplus = tomatrix(params+e); Rminus = tomatrix(params-e);
grad(j) = negloglike_t(nu, Rplus, t) - negloglike_t(nu, Rminus, t);
e(j) = 0;
end
% Normalize by increment to get derivative estimates.
grad = grad ./ (2 * deltaparams);
end
end
function d2 = d2profileLL_t(nu,profileNLL)
% Return a numerical approximation to the 2nd derivative of the neg
% profile loglikelihood for nu. Note that this is the profile LL,
% maximizing over Rho(nu), and not a slice of the full LL along nu.
% Compute a five-point central difference approximation to
% the second derivative. Assumes nu is > 2*delta.
delta = eps(class(nu))^(1/4);
fm2 = profileNLL(nu+2*delta);
fm1 = profileNLL(nu+delta);
fm = profileNLL(nu);
fp1 = profileNLL(nu-delta);
fp2 = profileNLL(nu-2*delta);
if all(diff(diff([fm2 fm1 fm fp1 fp2])) > 0)
d2 = (-fm2 + 16*fm1 - 30*fm + 16*fp1 - fp2) / (12 * delta.^2);
else
warning(message('stats:copulafit:ProfileLLNotConcave', sprintf( '%g', nu )));
d2 = NaN(class(nu));
end
end
function nll = negloglike_t(nu, R, t)
% Compute negative log-likelihood for a t copula at nu and R = chol(Rho)
[n,d] = size(t);
% R = R ./ repmat(sqrt(sum(R.^2,1)),d,1);
R = bsxfun(@rdivide, R, sqrt(sum(R.^2,1)));
% nll = -sum(log(mvtpdf(t,R'*R,nu)) - sum(log(tpdf(t,nu)),2)),
% where t = tinvLocal(u,nu)
tRinv = t / R;
nll = - n*gammaln((nu+d)/2) + n*d*gammaln((nu+1)/2) - n*(d-1)*gammaln(nu/2) ...
+ n*sum(log(abs(diag(R)))) ...
+ ((nu+d)/2)*sum(log(1+sum(tRinv.^2, 2)./nu)) ...
- ((nu+1)/2)*sum(sum(log(1+t.^2./nu),2),1);
end
%----------------------------------------------------------------------
%
% Utility functions
%
function x = tinvLocal(p,nu)
% For small d.f., call betaincinv which uses Newton's method
if nu < 1000
q = p - .5;
z = zeros(size(q),class(q));
oneminusz = zeros(size(q),class(q));
t = (abs(q(:)) < .25);
if any(t)
% for z close to 1, compute 1-z directly to avoid roundoff
oneminusz(t) = betaincinv(2.*abs(q(t)),0.5,nu/2,'lower');
z(t) = 1 - oneminusz(t);
end
t = ~t; % (abs(q) >= .25);
if any(t)
z(t) = betaincinv(2.*abs(q(t)),nu/2,0.5,'upper');
oneminusz(t) = 1 - z(t);
end
x = sign(q) .* sqrt(nu .* (oneminusz./z));
% For large d.f., use Abramowitz & Stegun formula 26.7.5
else
xn = norminv(p);
x = xn + (xn.^3+xn)./(4*nu) + ...
(5*xn.^5+16.*xn.^3+3*xn)./(96*nu.^2) + ...
(3*xn.^7+19*xn.^5+17*xn.^3-15*xn)./(384*nu.^3) +...
(79*xn.^9+776*xn.^7+1482*xn.^5-1920*xn.^3-945*xn)./(92160*nu.^4);
end
end
function [nearBnd,farBnd] = bracket1D(nllFun,nearBnd,farStart)
% Bracket the minimizer of a (one-param) negative log-likelihood function.
% nearBnd is a point known to be a lower/upper bound for the minimizer,
% this will be updated to tighten the bound if possible. farStart is the
% first trial point to test to see if it's an upper/lower bound for the
% minimizer. farBnd will be the desired upper/lower bound.
bound = farStart;
upperLim = 1e12; % arbitrary finite limit for search
oldnll = nllFun(bound);
oldbound = bound;
while abs(bound) <= upperLim
bound = 2*bound; % assumes lower start is < 0, upper is > 0
nll = nllFun(bound);
if nll > oldnll
% The neg loglikelihood increased, we're on the far side of the
% minimum, so the current point is the desired far bound.
farBnd = bound;
break;
else
% The neg loglikelihood continued to decrease, so the previous point
% is on the near side of the minimum, update the near bound.
nearBnd = oldbound;
end
oldnll = nll;
oldbound = bound;
end
if abs(bound) > upperLim
farBnd = NaN;
end
end
function b = tovector(A)
% Convert the Cholesky factor of a correlation matrix to upper triangle vector
% form. Columns of the Cholesky factor have unit norm, so they reduce to
% direction angles, consisting of one fewer element.
m = size(A,1);
Angles = zeros(m);
for i = 1:m-1
Angles(i,:) = atan2(A(i,:),A(i+1,:));
A(i+1,:) = A(i+1,:) ./ cos(Angles(i,:));
end
b = Angles(triu(true(m),1));
end
function A = tomatrix(b)
% Convert the Cholesky factor of a correlation matrix from upper triangle vector
% form. Columns of the Cholesky factor have unit norm, so the columns can be
% recreated from direction angles.
m = (1 + sqrt(1+8*length(b)))/2;
Cosines = zeros(m);
Cosines(triu(true(m),1)) = cos(b);
Sines = ones(m);
Sines(triu(true(m),1)) = sin(b);
flip = m:-1:1;
prodSines = cumprod(Sines(flip,:)); prodSines = prodSines(flip,:);
A = [ones(1,m); Cosines(1:m-1,:)] .* prodSines;
% A = [ones(1,m); Cosines(1:m-1,:)] .* flipud(cumprod(flipud(Sines)));
end