Commit a7afacaf authored by Dirk Eddelbuettel's avatar Dirk Eddelbuettel

Import Upstream version 1.3-15

parent a2359bc7
Package: mgcv
Version: 1.3-13
Version: 1.3-15
Author: Simon Wood <simon.wood@r-project.org>
Maintainer: Simon Wood <simon.wood@r-project.org>
Title: GAMs with GCV smoothness estimation and GAMMs by REML/PQL
......@@ -7,9 +7,9 @@ Description: Routines for GAMs and other generalized ridge regression
with multiple smoothing parameter selection by GCV or UBRE.
Also GAMMs by REML or PQL. Includes a gam() function.
Priority: recommended
Depends: R (>= 2.0.0)
Depends: R (>= 2.3.0)
Imports: graphics, stats
Suggests: nlme (>= 3.1-64), MASS (>= 7.2-2)
LazyLoad: yes
License: GPL version 2 or later
Packaged: Mon Jan 23 17:09:30 2006; simon
Packaged: Wed Apr 12 11:47:35 2006; simon
useDynLib(mgcv)
useDynLib(mgcv, .registration = TRUE, .fixes = "C_")
export(
anova.gam,coef.pdIdnot,coef.pdTens,
......
......@@ -137,7 +137,7 @@ rep(1, nobs), start = NULL, etastart = NULL,
if (sum(good)<ncol(x)) stop("Not enough informative observations.")
oo<-.C("update_beta",as.double(x[good,]),as.double(Sr),as.double(unlist(rS)),as.double(sp),
oo<-.C(C_update_beta,as.double(x[good,]),as.double(Sr),as.double(unlist(rS)),as.double(sp),
as.double(w),
as.double(w1),as.double(z),as.double(z1),as.integer(ncol(Sr)),
rSncol=as.integer(unlist(lapply(rS,ncol))),m=as.integer(length(rS)),
......@@ -145,7 +145,7 @@ rep(1, nobs), start = NULL, etastart = NULL,
q=as.integer(ncol(x)),get.trA=as.integer(0),as.integer(deriv),
rank.tol= as.double(.Machine$double.eps),
beta=as.double(upe$beta),trA=as.double(upe$trA),beta1=as.double(upe$beta1),
trA1=as.double(upe$trA1),rV=as.double(rV),rank=as.integer(1),PACKAGE="mgcv")
trA1=as.double(upe$trA1),rV=as.double(rV),rank=as.integer(1))
upe$beta <- oo$beta;
upe$beta1 <- matrix(oo$beta1,oo$q,oo$m)
......@@ -247,7 +247,7 @@ rep(1, nobs), start = NULL, etastart = NULL,
## Now do a final update.eta call to get trA and rV (rV%*%t(rV) is the
## posterior covariance matrix)....
oo<-.C("update_beta",as.double(x[good,]),as.double(Sr),as.double(unlist(rS)),as.double(sp),
oo<-.C(C_update_beta,as.double(x[good,]),as.double(Sr),as.double(unlist(rS)),as.double(sp),
as.double(w),
as.double(w1),as.double(z),as.double(z1),as.integer(ncol(Sr)),
rSncol=as.integer(unlist(lapply(rS,ncol))),m=as.integer(length(rS)),
......@@ -255,7 +255,7 @@ rep(1, nobs), start = NULL, etastart = NULL,
q=as.integer(ncol(x)),get.trA=as.integer(1),as.integer(deriv),
rank.tol= as.double(.Machine$double.eps),
beta=as.double(upe$beta),trA=as.double(upe$trA),beta1=as.double(upe$beta1),
trA1=as.double(upe$trA1),rV=as.double(rV),rank=as.integer(1),PACKAGE="mgcv")
trA1=as.double(upe$trA1),rV=as.double(rV),rank=as.integer(1))
rV <- matrix(oo$rV,ncol(x),ncol(x))
upe$beta <- oo$beta;
......
## R routines for the package mgcv (c) Simon Wood 2000-2005
## R routines for the package mgcv (c) Simon Wood 2000-2006
## With contributions from Henric Nilsson
mono.con<-function(x,up=TRUE,lower=NA,upper=NA)
......@@ -17,8 +17,8 @@ mono.con<-function(x,up=TRUE,lower=NA,upper=NA)
A<-matrix(0,4*(n-1)+lo+hi,n)
b<-array(0,4*(n-1)+lo+hi)
if (lo*hi==1&&lower>=upper) stop("lower bound >= upper bound in call to mono.con()")
oo<-.C("RMonoCon",as.double(A),as.double(b),as.double(x),as.integer(control),as.double(lower),
as.double(upper),as.integer(n),PACKAGE="mgcv")
oo<-.C(C_RMonoCon,as.double(A),as.double(b),as.double(x),as.integer(control),as.double(lower),
as.double(upper),as.integer(n))
A<-matrix(oo[[1]],dim(A)[1],dim(A)[2])
b<-array(oo[[2]],dim(A)[1])
list(A=A,b=b)
......@@ -30,7 +30,7 @@ uniquecombs<-function(x) {
if (is.null(x)) stop("x is null")
if (is.null(nrow(x))) stop("x has no row attribute")
if (is.null(ncol(x))) stop("x has no col attribute")
res<-.C("RuniqueCombs",as.double(x),as.integer(nrow(x)),as.integer(ncol(x)),PACKAGE="mgcv")
res<-.C(C_RuniqueCombs,as.double(x),as.integer(nrow(x)),as.integer(ncol(x)))
n<-res[[2]]*res[[3]]
x<-matrix(res[[1]][1:n],res[[2]],res[[3]])
x
......@@ -115,9 +115,9 @@ pcls <- function(M)
if (M$off[i]+df[i]-1>nar[2]) stop(paste("M$S[",i,"] is too large given M$off[",i,"]",sep=""))
}
o<-.C("RPCLS",as.double(M$X),as.double(M$p),as.double(M$y),as.double(M$w),as.double(M$Ain),as.double(M$bin)
o<-.C(C_RPCLS,as.double(M$X),as.double(M$p),as.double(M$y),as.double(M$w),as.double(M$Ain),as.double(M$bin)
,as.double(M$C),as.double(H),as.double(Sa),as.integer(M$off),as.integer(df),as.double(M$sp),
as.integer(length(M$off)),as.integer(nar),PACKAGE="mgcv")
as.integer(length(M$off)),as.integer(nar))
array(o[[2]],length(M$p))
}
......@@ -212,13 +212,13 @@ mgcv<-function(y,X,sp,S,off,C=NULL,w=rep(1,length(y)),H=NULL,scale=1,gcv=TRUE,co
sdiag<-array(0.0,2*direct.mesh) # array for gcv/ubre vs edf diagnostics
if (is.null(control$target.edf)) control$target.edf<- -1 # set to signal no target edf
oo<-.C("mgcv",as.double(y),as.double(X),as.double(C),as.double(w^2),as.double(Sa),
oo<-.C(C_mgcv,as.double(y),as.double(X),as.double(C),as.double(w^2),as.double(Sa),
as.double(p),as.double(sp),as.integer(off-1),as.integer(df),as.integer(m),
as.integer(n),as.integer(q),as.integer(C.r),as.double(scale),as.double(Vp),
as.double(edf),as.double(control$conv.tol),as.integer(control$max.half),as.double(ddiag),
as.integer(idiag),as.double(sdiag),as.integer(direct.mesh),as.double(control$min.edf),
as.double(gcv.ubre),as.double(control$target.edf),as.integer(fixed.sp),
as.double(hat),PACKAGE="mgcv")
as.double(hat))
p<-matrix(oo[[6]],q,1);
scale<-oo[[14]]
......@@ -561,10 +561,10 @@ smooth.construct.tp.smooth.spec<-function(object,data,knots)
Xu<-x
C<-array(0,k)
nXu<-0
oo<-.C("construct_tprs",as.double(x),as.integer(object$dim),as.integer(n),as.double(knt),as.integer(nk),
oo<-.C(C_construct_tprs,as.double(x),as.integer(object$dim),as.integer(n),as.double(knt),as.integer(nk),
as.integer(object$p.order),as.integer(object$bs.dim),X=as.double(X),S=as.double(S),
UZ=as.double(UZ),Xu=as.double(Xu),n.Xu=as.integer(nXu),C=as.double(C),
max.knots=as.integer(mtk),PACKAGE="mgcv")
max.knots=as.integer(mtk))
object$X<-matrix(oo$X,n,k) # model matrix
if (object$by!="NA") # deal with "by" variable
{ by <- get.var(object$by,data)
......@@ -631,9 +631,9 @@ smooth.construct.cr.smooth.spec<-function(object,data,knots)
stop(msg)
}
oo <- .C("construct_cr",as.double(x),as.integer(nx),as.double(k),
oo <- .C(C_construct_cr,as.double(x),as.integer(nx),as.double(k),
as.integer(nk),as.double(X),as.double(S),
as.double(C),as.integer(control),PACKAGE="mgcv")
as.double(C),as.integer(control))
object$X <- matrix(oo[[5]],nx,nk)
if (object$by!="NA") # deal with "by" variable
......@@ -806,9 +806,9 @@ Predict.matrix.cr.smooth<-function(object,data)
nk<-object$bs.dim
X <- rep(0,nx*nk);S<-rep(0,nk*nk);C<-rep(0,nk);control<-0
oo <- .C("construct_cr",as.double(x),as.integer(nx),as.double(object$xp),
oo <- .C(C_construct_cr,as.double(x),as.integer(nx),as.double(object$xp),
as.integer(object$bs.dim),as.double(X),as.double(S),
as.double(C),as.integer(control),PACKAGE="mgcv")
as.double(C),as.integer(control))
X<-matrix(oo[[5]],nx,nk) # the prediction matrix
if (object$by!="NA") # deal with "by" variable
{ by <- get.var(object$by,data)
......@@ -841,10 +841,9 @@ Predict.matrix.tprs.smooth<-function(object,data)
} else
{ by<-0;by.exists<-FALSE}
X<-matrix(0,n,object$bs.dim)
oo<-.C("predict_tprs",as.double(x),as.integer(object$dim),as.integer(n),as.integer(object$p.order),
oo<-.C(C_predict_tprs,as.double(x),as.integer(object$dim),as.integer(n),as.integer(object$p.order),
as.integer(object$bs.dim),as.integer(object$null.space.dim),as.double(object$Xu),
as.integer(nrow(object$Xu)),as.double(object$UZ),as.double(by),as.integer(by.exists),X=as.double(X)
,PACKAGE="mgcv")
as.integer(nrow(object$Xu)),as.double(object$UZ),as.double(by),as.integer(by.exists),X=as.double(X))
X<-matrix(oo$X,n,object$bs.dim)
}
......@@ -3059,8 +3058,8 @@ exclude.too.far<-function(g1,g2,d1,d2,dist)
if (m!=length(d2)) stop("data vectors are of different lengths")
if (dist<0) stop("supplied dist negative")
distance<-array(0,n)
o<-.C("MinimumSeparation",as.double(g1),as.double(g2),as.integer(n),as.double(d1),as.double(d2),as.integer(m),
distance=as.double(distance),PACKAGE="mgcv")
o<-.C(C_MinimumSeparation,as.double(g1),as.double(g2),as.integer(n),as.double(d1),as.double(d2),
as.integer(m),distance=as.double(distance))
res<-rep(FALSE,n)
res[o$distance > dist] <-TRUE
res
......@@ -3276,7 +3275,6 @@ vis.gam <- function(x,view=NULL,cond=list(),n.grid=30,too.far=0,col=NA,color="he
# From here on is the code for magic.....
mroot <- function(A,rank=NULL,method="chol")
# finds the smallest square root of A, or the best approximate square root of
# given rank. B is returned where BB'=A. A assumed non-negative definite.
......@@ -3298,7 +3296,9 @@ mroot <- function(A,rank=NULL,method="chol")
return(t(t(um$u[,1:rank])*as.vector(d))) # note recycling rule used for efficiency
} else
if (method=="chol")
{ L<-chol(A,pivot=TRUE)
{ op<-options(warn=-1) ## don't want to be warned it's not +ve def
L<-chol(A,pivot=TRUE)
options(op) ## reset default warnings
piv<-order(attr(L,"pivot"))
if (is.null(rank)) rank<-attr(L,"rank")
L<-L[,piv];L<-t(L[1:rank,])
......@@ -3493,10 +3493,9 @@ magic <- function(y,X,sp,S,off,rank=NULL,H=NULL,C=NULL,w=NULL,gamma=1,scale=1,gc
icontrol[7]<-control$maxit
b<-array(0,icontrol[3])
# argument names in call refer to returned values.
um<-.C("magic",as.double(y),as.double(X),sp=as.double(sp),as.double(def.sp),as.double(Si),as.double(H),
um<-.C(C_magic,as.double(y),as.double(X),sp=as.double(sp),as.double(def.sp),as.double(Si),as.double(H),
score=as.double(gamma),scale=as.double(scale),info=as.integer(icontrol),as.integer(cS),
as.double(control$rank.tol),rms.grad=as.double(control$tol),b=as.double(b),rV=double(q*q),
PACKAGE="mgcv")
as.double(control$rank.tol),rms.grad=as.double(control$tol),b=as.double(b),rV=double(q*q))
res<-list(b=um$b,scale=um$scale,score=um$score,sp=um$sp)
res$rV<-matrix(um$rV[1:(um$info[1]*q)],q,um$info[1])
gcv.info<-list(full.rank=full.rank,rank=um$info[1],fully.converged=as.logical(um$info[2]),
......
1.3-15
* chol(A,pivot=TRUE) now (R 2.3.0) generates a warning if `A' is not +ve definite.
`mroot' modified to supress this (since it only calls `chol(A,pivot=TRUE)'
because `A' is usually +ve semi-definite).
1.3-14
* mat.c:mgcv_symeig modified to allow selection of the LAPACK routine
actually used: dsyevd is the routine used previously, and seems very
reliable. dsyevr is the faster, smaller more modern version, which it
seems possible to break... rest of code still calls dsyevd.
* Symbol registration added (thanks largely to Brian Ripley). Version
depends on R >= 2.3.0
1.3-13
* some doc changes
......
citHeader("2000 for basics of set up and s.p. estimation;
2003 for thin plate regression splines etc; 2004 for current default
algorithms and basics of gamm.")
citHeader("2000 for basics set up and old smoothness selection method;
2003 for thin plate regression splines etc; 2004 for newer, default,
methods and basics of gamm; 2006 for overview.")
citEntry(
entry="Article",
......@@ -45,5 +45,13 @@ citEntry(
American Statistical Association. 99:673-686."
)
citEntry(
entry="Book",
title="Generalized Additive Models: An Introduction with R",
year="2006",
author="S.N Wood",
publisher="Chapman and Hall/CRC",
textVersion="Wood, S.N. (2006) Generalized Additive Models: An
Introduction with R. Chapman and Hall/CRC. "
)
......@@ -228,7 +228,9 @@ them above and below by an effective infinity and effective zero. See
}
\seealso{\code{\link{te}}, \code{\link{s}}, \code{\link{predict.gam}},
\seealso{\code{\link{magic}} for an alternative for correlated data,
\code{\link{te}}, \code{\link{s}},
\code{\link{predict.gam}},
\code{\link{plot.gam}}, \code{\link{summary.gam}}, \code{\link{gam.neg.bin}},
\code{\link{vis.gam}},\code{\link{pdTens}},\code{\link{gamm.setup}}
}
......
......@@ -57,7 +57,7 @@ problem. If \eqn{\bf b}{b} is the parameter vector then the parameters are force
square root of the inverse of the covariance matrix of \code{y}, specifically
\eqn{ {\bf V}_y^{-1} = {\bf w}^\prime{\bf w}}{V_y^{-1}=w'w}. If \code{w} is an array then
it is taken as the diagonal of this matrix, or simply the weight for each element of
\code{y}.}
\code{y}. See below for an example using this.}
\item{gamma}{is an inflation factor for the model degrees of freedom in the GCV or UBRE
score.}
......@@ -179,26 +179,56 @@ generalized additive models. J. Amer. Statist. Ass. 99:673-686
\code{\link{gam}},
}
\examples{
library(mgcv)
set.seed(1);n<-400;sig2<-4
x0 <- runif(n, 0, 1);x1 <- runif(n, 0, 1)
x2 <- runif(n, 0, 1);x3 <- runif(n, 0, 1)
f <- 2 * sin(pi * x0)
f <- f + exp(2 * x1) - 3.75887
f <- f+0.2*x2^11*(10*(1-x2))^6+10*(10*x2)^3*(1-x2)^10
e <- rnorm(n, 0, sqrt(sig2))
y <- f + e
# set up additive model
G<-gam(y~s(x0)+s(x1)+s(x2)+s(x3),fit=FALSE)
# fit using magic
mgfit<-magic(G$y,G$X,G$sp,G$S,G$off,G$rank,C=G$C)
# and fit using gam as consistency check
b<-gam(G=G)
mgfit$sp;b$sp # compare smoothing parameter estimates
edf<-magic.post.proc(G$X,mgfit,G$w)$edf # extract e.d.f. per parameter
# get termwise e.d.f.s
twedf<-0;for (i in 1:4) twedf[i]<-sum(edf[((i-1)*10+1):(i*10)])
twedf;b$edf # compare
## Use `magic' for a standard additive model fit ...
library(mgcv)
set.seed(1);n <- 400;sig2 <- 4
x0 <- runif(n, 0, 1);x1 <- runif(n, 0, 1)
x2 <- runif(n, 0, 1);x3 <- runif(n, 0, 1)
f <- 2 * sin(pi * x0)
f <- f + exp(2 * x1) - 3.75887
f <- f+0.2*x2^11*(10*(1-x2))^6+10*(10*x2)^3*(1-x2)^10
e <- rnorm(n, 0, sqrt(sig2))
y <- f + e
## set up additive model
G <- gam(y~s(x0)+s(x1)+s(x2)+s(x3),fit=FALSE)
## fit using magic
mgfit <- magic(G$y,G$X,G$sp,G$S,G$off,G$rank,C=G$C)
## and fit using gam as consistency check
b <- gam(G=G)
mgfit$sp;b$sp # compare smoothing parameter estimates
edf <- magic.post.proc(G$X,mgfit,G$w)$edf # extract e.d.f. per parameter
## get termwise e.d.f.s
twedf <- 0;for (i in 1:4) twedf[i] <- sum(edf[((i-1)*10+1):(i*10)])
twedf;b$edf # compare
## Now a correlated data example ...
library(nlme)
## simulate truth
set.seed(1);n<-400;sig<-2
x <- 0:(n-1)/(n-1)
f <- 0.2*x^11*(10*(1-x))^6+10*(10*x)^3*(1-x)^10-1.396
## produce scaled covariance matrix for AR1 errors...
V <- corMatrix(Initialize(corAR1(.6),data.frame(x=x)))
Cv <- chol(V) # t(Cv)%*%Cv=V
## Simulate AR1 errors ...
e <- t(Cv)\%*\%rnorm(n,0,sig) # so cov(e) = V * sig^2
## Observe truth + AR1 errors
y <- f + e
## GAM ignoring correlation
par(mfrow=c(1,2))
b <- gam(y~s(x,k=20))
plot(b);lines(x,f-mean(f),col=2);title("Ignoring correlation")
## Fit smooth, taking account of *known* correlation...
w <- solve(t(Cv)) # V^{-1} = w'w
## Use `gam' to set up model for fitting...
G <- gam(y~s(x,k=20),fit=FALSE)
## fit using magic, with weight *matrix*
mgfit <- magic(G$y,G$X,G$sp,G$S,G$off,G$rank,C=G$C,w=w)
## Modify previous gam object using new fit, for plotting...
mg.stuff <- magic.post.proc(G$X,mgfit,w)
b$edf <- mg.stuff$edf;b$Vp <- mg.stuff$Vb
b$coefficients <- mgfit$b
plot(b);lines(x,f-mean(f),col=2);title("Known correlation")
}
......
......@@ -18,8 +18,9 @@ ridge regression and penalized linearly constrained least squares are also provi
in use to the S functions of the same name designed by Trevor Hastie.
However the underlying representation and estimation of the models is based on a
penalized regression spline approach, with automatic smoothness selection. A
number of other functions such as \code{\link{summary.gam}} and \code{\link{anova.gam}} are also provided, for
extracting information from a fitted \code{\link{gamObject}}.
number of other functions such as \code{\link{summary.gam}} and
\code{\link{anova.gam}} are also provided,
for extracting information from a fitted \code{\link{gamObject}}.
Use of \code{\link{gam}} is much like use of \code{\link{glm}}, except that
within a \code{gam} model formula, isotropic smooths of any number of predictors can be specified using
......@@ -45,7 +46,10 @@ always good in this case.
The package also provides a generalized additive mixed modelling function,
\code{\link{gamm}}, based on \code{glmmPQL} from the \code{MASS} library and
\code{lme} from the \code{nlme} library.
\code{lme} from the \code{nlme} library. \code{gamm} is particularly useful
for modelling correlated data (i.e. where a simple independence model for the
residual variation is inappropriate). In addition, low level routine \code{\link{magic}}
can fit models to data with a known correlation structure.
Some of the underlying GAM fitting methods are available as low level fitting
functions: see \code{\link{magic}} and \code{\link{mgcv}}. Penalized weighted
......@@ -57,6 +61,9 @@ For a complete list of functions type \code{library(help=mgcv)}.
\author{
Simon Wood <simon.wood@r-project.org>
with contributions and/or help from Kurt Hornik, Mike Lonergan, Henric Nilsson
and Brian Ripley.
Maintainer: Simon Wood <simon.wood@r-project.org>
}
......
......@@ -30,6 +30,19 @@ problems into ordinary regression problems.}
\author{ Simon N. Wood \email{simon.wood@r-project.org}}
\examples{
set.seed(0)
a <- matrix(runif(24),6,4)
A <- a\%*\%t(a) ## A is +ve semi-definite, rank 4
B <- mroot(A) ## default pivoted choleski method
tol <- 100*.Machine$double.eps
chol.err <- max(abs(A-B\%*\%t(B)));chol.err
if (chol.err>tol) warning("mroot (chol) suspect")
B <- mroot(A,method="svd") ## svd method
svd.err <- max(abs(A-B\%*\%t(B)));svd.err
if (svd.err>tol) warning("mroot (svd) suspect")
}
\keyword{models} \keyword{smooth} \keyword{regression}%-- one or more ..
......
/* Symbol registration initialization: original provided by Brian Ripley.
Anything called from R should be registered here. (See also NAMESPACE:1)
*/
#include <R.h>
#include <Rinternals.h>
#include <R_ext/Rdynload.h>
#include "mgcv.h"
R_CMethodDef CEntries[] = {
{"update_beta", (DL_FUNC) &update_beta, 22},
{"RMonoCon", (DL_FUNC) &RMonoCon, 7},
{"RuniqueCombs", (DL_FUNC) &RuniqueCombs, 3},
{"RPCLS", (DL_FUNC) &RPCLS, 14},
{"mgcv", (DL_FUNC) &mgcv, 27},
{"construct_tprs", (DL_FUNC) &construct_tprs, 14},
{"construct_cr", (DL_FUNC) &construct_cr, 8},
{"predict_tprs", (DL_FUNC) &predict_tprs, 12},
{"MinimumSeparation", (DL_FUNC) &MinimumSeparation, 7},
{"magic", (DL_FUNC) &magic, 14},
{NULL, NULL, 0}
};
void R_init_mgcv(DllInfo *dll)
{
R_registerRoutines(dll, CEntries, NULL, NULL, NULL);
R_useDynamicSymbols(dll, FALSE);
}
......@@ -663,7 +663,7 @@ void magic(double *y,double *X,double *sp,double *def_sp,double *S,double *H,dou
*/
{ int *pi,*pivot,q,n,autoinit,left,ScS,m,i,j,tp,k,use_sd=0,rank,converged,iter=0,ok,
gcv,try,fit_call=0,step_fail=0,max_half,*spok,/* *dir_sp,*/maxit,def_supplied;
gcv,try,fit_call=0,step_fail=0,max_half,*spok,/* *dir_sp,*/maxit,def_supplied,use_dsyevd=1;
double *p,*p1,*p2,*tau,xx,*y1,*y0,yy,**Si=NULL,*work,score,*sd_step,*n_step,*U1,*V,*d,**M,**K,
*VS,*U1U1,**My,**Ky,**yK,*dnorm,*ddelta,**d2norm,**d2delta,norm,delta,*grad,**hess,*nsp,
min_score,*step,d_score=1e10,*ev=NULL,*u,msg=0.0,Xms,*rSms,*bag,*bsp,sign;
......@@ -843,7 +843,7 @@ void magic(double *y,double *X,double *sp,double *def_sp,double *S,double *H,dou
U1,V,d,y1,rank,q,m,cS,gcv,gamma,scale,norm,delta,n);
/* Now get the search directions */
for (i=0;i<m;i++) for (j=0;j<m;j++) u[i+m*j]=hess[i][j];
mgcv_symeig(u,ev,&m); /* columns of hess are now eigen-vectors */
mgcv_symeig(u,ev,&m,&use_dsyevd); /* columns of hess are now eigen-vectors */
use_sd=0;for (p=ev;p<ev+m;p++) if (*p<0.0) {use_sd=1;break;} /* check hessian +ve def */
if (!use_sd) /* get the Newton direction Hess^{-1}grad */
{ for (i=0;i<m;i++) { for (xx=0.0,j=0;j<m;j++) xx+=u[j+m*i]*grad[j];sd_step[i]=xx/ev[i];}
......
......@@ -265,26 +265,49 @@ qr.R(qr(Xa,tol=0))
free(x);free(work);
}
void mgcv_symeig(double *A,double *ev,int *n)
void mgcv_symeig(double *A,double *ev,int *n,int *use_dsyevd)
/* gets eigenvalues and vectors of symmetric matrix A.
Two alternative underlying LAPACK routines can be used,
either dsyevd (slower, robust) or dsyevr (faster, seems less robust).
Vectors returned in columns of A, values in ev.
Testing R code....
library(mgcv)
n<-4;A<-matrix(rnorm(n*n),n,n);A<-A%*%t(A);d<-array(0,n)
er<-eigen(A)
um<-.C("mgcv_symeig",as.double(A),as.double(d),as.integer(n),PACKAGE="mgcv")
um<-.C("mgcv_symeig",as.double(A),as.double(d),as.integer(n),as.integer(1),PACKAGE="mgcv")
er$vectors;matrix(um[[1]],n,n)
er$values;um[[2]]
*/
{ char jobz='V',uplo='U';
double work1,*work;
int lwork = -1,liwork = -1,iwork1,info,*iwork;
F77_NAME(dsyevd)(&jobz,&uplo,n,A,n,ev,&work1,&lwork,&iwork1,&liwork,&info);
lwork=(int)floor(work1);if (work1-lwork>0.5) lwork++;
work=(double *)calloc((size_t)lwork,sizeof(double));
liwork = iwork1;iwork= (int *)calloc((size_t)liwork,sizeof(int));
F77_NAME(dsyevd)(&jobz,&uplo,n,A,n,ev,work,&lwork,iwork,&liwork,&info);
free(work);free(iwork);
{ char jobz='V',uplo='U',range='A';
double work1,*work,dum1=0,abstol=0.0,*Z,*dum2;
int lwork = -1,liwork = -1,iwork1,info,*iwork,dumi=0,n_eval=0,*isupZ;
if (*use_dsyevd)
{ F77_NAME(dsyevd)(&jobz,&uplo,n,A,n,ev,&work1,&lwork,&iwork1,&liwork,&info);
lwork=(int)floor(work1);if (work1-lwork>0.5) lwork++;
work=(double *)calloc((size_t)lwork,sizeof(double));
liwork = iwork1;iwork= (int *)calloc((size_t)liwork,sizeof(int));
F77_NAME(dsyevd)(&jobz,&uplo,n,A,n,ev,work,&lwork,iwork,&liwork,&info);
free(work);free(iwork);
} else
{ Z=(double *)calloc((size_t)(*n * *n),sizeof(double)); /* eigen-vector storage */
isupZ=(int *)calloc((size_t)(2 * *n),sizeof(int)); /* eigen-vector support */
F77_NAME(dsyevr)(&jobz,&range,&uplo,
n,A,n,&dum1,&dum1,&dumi,&dumi,
&abstol,&n_eval,ev,
Z,n,isupZ, &work1,&lwork,&iwork1,&liwork,&info);
lwork=(int)floor(work1);if (work1-lwork>0.5) lwork++;
work=(double *)calloc((size_t)lwork,sizeof(double));
liwork = iwork1;iwork= (int *)calloc((size_t)liwork,sizeof(int));
F77_NAME(dsyevr)(&jobz,&range,&uplo,
n,A,n,&dum1,&dum1,&dumi,&dumi,
&abstol,&n_eval,ev,
Z,n,isupZ, work,&lwork,iwork,&liwork,&info);
free(work);free(iwork);
dum2 = Z + *n * *n; /* copy vectors back into A */
for (work=Z;work<dum2;work++,A++) *A = *work;
free(Z);free(isupZ);
}
}
/* main method routines */
void mgcv(double *yd,double *Xd,double *Cd,double *wd,double *Sd,double *pd, double *sp,
int *offd,int *dimd,int *md,int *nd,int *qd,int *rd,double *sig2d, double *Vpd,
double *edf, double *conv_tol, int *ms_max_half,double *ddiag,int *idiag,double *sdiag,
int *direct_mesh,double *min_edf,double *gcvubre,double *target_edf,int *fixed_sp,double *hat);
void update_beta(double *X,double *Sr,double *rS,double *theta,double *w,
double *w1, double *z,double *z1,int *Srncol,int *rSncol,
int *m, int *n,int *q, int *get_trA,int *deriv,
double *rank_tol,double *beta, double *trA, double *beta1,
double *trA1,double *rV,int *rank_est);
void magic(double *y,double *X,double *sp,double *def_sp,double *S,double *H,
double *gamma,double *scale, int *control,int *cS,double *rank_tol,
double *tol,double *b,double *rV);
/* various service routines */
void RQT(double *A,int *r,int *c);
void RuniqueCombs(double *X,int *r, int *c);
void RPCLS(double *Xd,double *pd,double *yd, double *wd,double *Aind,double *bd,double *Afd,double *Hd,double *Sd,int *off,int *dim,double *theta, int *m,int *nar);
......@@ -17,9 +28,11 @@ void mgcv_qr(double *x, int *r, int *c,int *pivot,double *tau);
void update_qr(double *Q,double *R,int *n, int *q,double *lam, int *k);
void mgcv_mmult(double *A,double *B,double *C,int *bt,int *ct,int *r,int *c,int *n);
void mgcv_svd_full(double *x,double *vt,double *d,int *r,int *c);
void mgcv_symeig(double *A,double *ev,int *n);
void mgcv_symeig(double *A,double *ev,int *n,int *use_dsyevd);
void mroot(double *A,int *rank,int *n);
/* basis constructor/prediction routines*/
void construct_cr(double *x,int *nx,double *k,int *nk,double *X,double *S,double *C,int *control);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment