Import Upstream version 1.3-17

parent 8c5bfd4e
 Package: mgcv Version: 1.3-16 Version: 1.3-17 Author: Simon Wood Maintainer: Simon Wood Title: GAMs with GCV smoothness estimation and GAMMs by REML/PQL ... ... @@ -12,4 +12,4 @@ Imports: graphics, stats Suggests: nlme (>= 3.1-64), MASS (>= 7.2-2) LazyLoad: yes License: GPL version 2 or later Packaged: Fri Apr 21 12:07:52 2006; simon Packaged: Wed May 17 11:35:14 2006; simon
 ## R routines for gam fitting with calculation of derivatives w.r.t. sp.s ## (c) Simon Wood 2004,2005 ## (c) Simon Wood 2004,2005,2006 gam.fit2 <- function (x, y, sp, S=list(),rS=list(),off, H=NULL, weights = rep(1, nobs), start = NULL, etastart = NULL, ... ... @@ -394,34 +394,44 @@ gam3objective <- function(lsp,args) ret } fix.family.link<-function(fam) # adds d2link the second derivative of the link function w.r.t. mu # to the family supplied. # All d2link functions have been checked numerically. # to the family supplied, as well as a 3rd derivative function # d3link... # All d2link and d3link functions have been checked numerically. { if (!inherits(fam,"family")) stop("fam not a family object") if (!is.null(fam$d2link)) return(fam) link <- fam$link if (length(link)>1) if (fam$family=="quasi") # then it's a power link { lambda <- log(fam$linkfun(exp(1))) if (lambda<=0) fam$d2link <- function(mu) -1/mu^2 else fam$d2link <- function(mu) lambda*(lambda-1)*mu^(lambda-2) { lambda <- log(fam$linkfun(exp(1))) ## the power, if > 0 if (lambda<=0) { fam$d2link <- function(mu) -1/mu^2 fam$d3link <- function(mu) 2/mu^3 } else { fam$d2link <- function(mu) lambda*(lambda-1)*mu^(lambda-2) fam$d3link <- function(mu) (lambda-2)*(lambda-1)*lambda*mu^(lambda-3) } return(fam) } else stop("unrecognized (vector?) link") if (link=="identity") { fam$d2link <- function(mu) rep.int(0,length(mu)) fam$d3link <- fam$d2link <- function(mu) rep.int(0,length(mu)) return(fam) } if (link == "log") { fam$d2link <- function(mu) -1/mu^2 fam$d3link <- function(mu) 2/mu^3 return(fam) } if (link == "inverse") { fam$d2link <- function(mu) 2/mu^3 fam$d3link <- function(mu) {mu <- mu*mu;-6/(mu*mu)} return(fam) } if (link == "logit") { fam$d2link <- function(mu) 1/(1 - mu)^2 - 1/mu^2 fam$d3link <- function(mu) 2/(1 - mu)^3 + 2/mu^3 return(fam) } if (link == "probit") { ... ... @@ -429,21 +439,43 @@ fix.family.link<-function(fam) eta <- fam$linkfun(mu) eta/fam$mu.eta(eta)^2 } fam$d3link <- function(mu) { eta <- fam$linkfun(mu) (1 + 2*eta^2)/fam$mu.eta(eta)^3 } return(fam) } if (link == "cloglog") { fam$d2link <- function(mu) { -(1/(1 - mu)^2/log(1 - mu) + 1/(1 - mu) * (1/(1 - mu))/log(1 - mu)^2) fam$d2link <- function(mu) { l1m <- log(1-mu) -1/((1 - mu)^2*l1m) *(1+ 1/l1m) } fam$d3link <- function(mu) { l1m <- log(1-mu) mu3 <- (1-mu)^3 -1/(mu3 * l1m^3) -(1 + 2 * l1m)/ (mu3 * l1m^2) * (1 + 1/l1m) } return(fam) } if (link == "sqrt") { fam$d2link <- function(mu) -.25 * mu^-1.5 fam$d3link <- function(mu) .375 * mu^-2.5 return(fam) } if (link == "cauchit") { fam$d2link <- function(mu) { eta <- fam$linkfun(mu) 2*pi*pi*eta*(1+eta*eta) } fam$d3link <- function(mu) { eta <- fam$linkfun(mu) eta2 <- eta*eta 2*pi*pi*pi*(1+3*eta2)*(1+eta2) } return(fam) } if (link == "1/mu^2") { fam$d2link <- function(mu) 6 * mu^-4 fam$d3link <- function(mu) -24* mu^-5 return(fam) } stop("link not recognised") ... ... @@ -452,24 +484,28 @@ fix.family.link<-function(fam) fix.family.var<-function(fam) # adds dvar the derivative of the variance function w.r.t. mu # to the family supplied. # to the family supplied, as well as d2var the 2nd derivative of # the variance function w.r.t. the mean. (All checked numerically). { if (!inherits(fam,"family")) stop("fam not a family object") if (!is.null(fam$dvar)) return(fam) family <- fam$family if (family=="gaussian") { fam$dvar <- function(mu) rep.int(0,length(mu)) fam$d2var <- fam$dvar <- function(mu) rep.int(0,length(mu)) return(fam) } if (family=="poisson"||family=="quasipoisson") { fam$dvar <- function(mu) rep.int(1,length(mu)) fam$d2var <- function(mu) rep.int(0,length(mu)) return(fam) } if (family=="binomial"||family=="quasibinomial") { fam$dvar <- function(mu) 1-2*mu fam$d2var <- function(mu) rep.int(-2,length(mu)) return(fam) } if (family=="Gamma") { fam$dvar <- function(mu) 2*mu fam$d2var <- function(mu) rep.int(2,length(mu)) return(fam) } if (family=="quasi") { ... ... @@ -481,15 +517,25 @@ fix.family.var<-function(fam) "mu^3" = function(mu) 3*mu^2 ) if (is.null(fam$dvar)) stop("variance function not recognized for quasi") fam$d2var <- switch(fam$varfun, constant = function(mu) rep.int(0,length(mu)), "mu(1-mu)" = function(mu) rep.int(-2,length(mu)), mu = function(mu) rep.int(0,length(mu)), "mu^2" = function(mu) rep.int(2,length(mu)), "mu^3" = function(mu) 6*mu ) return(fam) } if (family=="inverse.gaussian") { fam$dvar <- function(mu) 3*mu^2 fam$d2var <- function(mu) 6*mu return(fam) } stop("family not recognised") } totalPenalty <- function(S,H,off,theta,p) { if (is.null(H)) St <- matrix(0,p,p) else { St <- H; ... ...
This diff is collapsed.
 1.3-17 * print.summary.gam prints estimated ranks more prettily (thanks Martin Maechler) * fix.family.link' can now handle the cauchit' link, and also appends a third derivative of link function to the family (not yet used). * fix.family.var' now adds a second derivative of the link function to the family (not yet used). * magic' modified to (i) accept an argument rss.extra' which is added to the RSS(squared norm) term in the GCV/UBRE or scale calculation; (ii) accept argument n.score' (defaults to number of data), the number to use in place of the number of data in the GCV/UBRE calculation. These are useful for dealing with very large data sets using pseudo-model approaches. * trans' and shift' arguments added to plot.gam': allows, e.g. single smooth models to be easily plotted on uncentred response scale. * Some .Rd bug fixes. * Addition of choose.k.Rd helpfile, including example code for diagnosing overly restrictive choice of smoothing basis dimension k'. 1.3-16 * bug fix in predict.gam documentation + example of how to predict from a ... ... @@ -5,9 +31,9 @@ 1.3-15 * chol(A,pivot=TRUE) now (R 2.3.0) generates a warning if A' is not +ve definite. mroot' modified to supress this (since it only calls chol(A,pivot=TRUE)' because A' is usually +ve semi-definite). * chol(A,pivot=TRUE) now (R 2.3.0) generates a warning if A' is not +ve definite. mroot' modified to supress this (since it only calls chol(A,pivot=TRUE)' because A' is usually +ve semi-definite). 1.3-14 ... ...
man/choose.k.Rd 0 → 100755
 \name{choose.k} \alias{choose.k} %- Also NEED an \alias' for EACH other topic documented here. \title{Basis dimension choice for smooths} \description{Choosing the basis dimension, and checking the choice, when using penalized regression smoothers. Penalized regression smoothers gain computational efficiency by virtue of being defined using a basis of relatively modest size, \code{k}. When setting up models in the \code{mgcv} package, using \code{\link{s}} or \code{\link{te}} terms in a model formula, \code{k} must be chosen. In practice \code{k-1} sets the upper limit on the degrees of freedom associated with an \code{\link{s}} smooth (1 degree of freedom is lost to the identifiability constraint on the smooth). For \code{\link{te}} smooths the upper limit of the degrees of freedom is given by the product of the \code{k} values provided for each marginal smooth less one, for the constraint. However the actual effective degrees of freedom are controlled by the degree of penalization selected during fitting, by GCV, AIC or whatever is specified. The exception to this is if a smooth is specified using the \code{fx=TRUE} option, in which case it is unpenalized. So, exact choice of \code{k} is not generally critical: it should be chosen to be large enough that you are reasonably sure of having enough degrees of freedom to represent the underlying truth' reasonably well, but small enough to maintain reasonable computational efficiency. Clearly large' and small' are dependent on the particular problem being addressed. As with all model assumptions, it is useful to be able to check the choice of \code{k} informally. If the effective degrees of freedom for a model term are estimated to be much less than \code{k-1} then this is unlikely to be very worthwhile, but as the EDF approach \code{k-1}, checking can be important. A useful general purpose approach goes as follows: (i) fit your model and extract the deviance residuals; (ii) for each smooth term in your model, fit an equivalent, single, smooth to the residuals, using a substantially increased \code{k} to see if there is pattern in the residuals that could potentially be explained by increasing \code{k}. Examples are provided below. More sophisticated approaches based on partial residuals are also possible. One scenario that can cause confusion is this: a model is fitted with \code{k=10} for a smooth term, and the EDF for the term is estimated as 7.6, some way below the maximum of 9. The model is then refitted with \code{k=20} and the EDF increases to 8.7 - what is happening - how come the EDF was not 8.7 the first time around? The explanation is that the function space with \code{k=20} contains a larger subspace of functions with EDF 8.7 than did the function space with \code{k=10}: one of the functions in this larger subspace fits the data a little better than did any function in the smaller subspace. These subtleties seldom have much impact on the statistical conclusions to be drawn from a model fit, however. } \author{ Simon N. Wood \email{simon.wood@r-project.org}} \references{ Wood, S.N. (2006) Generalized Additive Models: An Introduction with R. CRC. \url{http://www.maths.bath.ac.uk/~sw283/} } \examples{ ## Simulate some data .... library(mgcv) set.seed(0) n<-400;sig<-2 x0 <- runif(n, 0, 1);x1 <- runif(n, 0, 1) x2 <- runif(n, 0, 1);x3 <- runif(n, 0, 1) f <- 2 * sin(pi * x0) f <- f + exp(2 * x1) - 3.75887 f <- f+0.2*x2^11*(10*(1-x2))^6+10*(10*x2)^3*(1-x2)^10-1.396 e <- rnorm(n, 0, sig) y <- f + e ## fit a GAM with quite low k' b<-gam(y~s(x0,k=6)+s(x1,k=6)+s(x2,k=6)+s(x3,k=6)) plot(b,pages=1) ## check for residual pattern, removeable by increasing k' ## typically k', below, chould be substantially larger than ## the original, k' but certainly less than n/2. ## Note use of cheap "cs" shrinkage smoothers, and gamma=1.4 ## to reduce chance of overfitting... rsd <- residuals(b) gam(rsd~s(x0,k=40,bs="cs"),gamma=1.4) ## fine gam(rsd~s(x1,k=40,bs="cs"),gamma=1.4) ## fine gam(rsd~s(x2,k=40,bs="cs"),gamma=1.4) ## original k' too low gam(rsd~s(x3,k=40,bs="cs"),gamma=1.4) ## fine ## similar example with multi-dimensional smooth b1 <- gam(y~s(x0)+s(x1,x2,k=15)+s(x3)) rsd <- residuals(b1) gam(rsd~s(x0,k=40,bs="cs"),gamma=1.4) ## fine gam(rsd~s(x1,x2,k=100,bs="ts"),gamma=1.4) ## original k' too low gam(rsd~s(x3,k=40,bs="cs"),gamma=1.4) ## fine ## and a te' example b2 <- gam(y~s(x0)+te(x1,x2,k=4)+s(x3)) rsd <- residuals(b2) gam(rsd~s(x0,k=40,bs="cs"),gamma=1.4) ## fine gam(rsd~te(x1,x2,k=10,bs="cs"),gamma=1.4) ## original k' too low gam(rsd~s(x3,k=40,bs="cs"),gamma=1.4) ## fine ## same approach works with other families in the original model g<-exp(f/4) y<-rpois(rep(1,n),g) bp<-gam(y~s(x0,k=6)+s(x1,k=6)+s(x2,k=6)+s(x3,k=6),family=poisson) rsd <- residuals(bp) gam(rsd~s(x0,k=40,bs="cs"),gamma=1.4) ## fine gam(rsd~s(x1,k=40,bs="cs"),gamma=1.4) ## fine gam(rsd~s(x2,k=40,bs="cs"),gamma=1.4) ## original k' too low gam(rsd~s(x3,k=40,bs="cs"),gamma=1.4) ## fine } \keyword{models} \keyword{regression}%-- one or more ..
 ... ... @@ -82,7 +82,7 @@ Wood, S.N. (2003) Thin plate regression splines. J.R.Statist.Soc.B 65(1):95-114 \author{ Simon N. Wood \email{simon.wood@r-project.org}} \seealso{ \code{\link{gam}}, \code{\link{mgcv}}, \code{\link{magic}}} \seealso{ \code{\link{choose.k}}, \code{\link{gam}}, \code{\link{mgcv}}, \code{\link{magic}}} \examples{ library(mgcv) ... ...
 ... ... @@ -41,7 +41,8 @@ basis sets an upper limit on the maximum possible degrees of freedom for the basis - the limit is typically one less than basis dimension). Full details of how to control smooths are given in \code{\link{s}} and \code{\link{te}}. For the moment suppose that we would like to change \code{\link{s}} and \code{\link{te}}, and further discussion of basis dimension choice can be found in \code{\link{choose.k}}. For the moment suppose that we would like to change the basis of the first smooth to a cubic regression spline basis with a dimension of 20, while fixing the second term at 25 degrees of freedom. The appropriate formula would be:\cr ... ...
 ... ... @@ -16,10 +16,10 @@ value decomposition approach. } %- end description \usage{ magic(y,X,sp,S,off,rank=NULL,H=NULL,C=NULL,w=NULL, gamma=1,scale=1,gcv=TRUE,ridge.parameter=NULL, magic(y,X,sp,S,off,rank=NULL,H=NULL,C=NULL,w=NULL,gamma=1, scale=1,gcv=TRUE,ridge.parameter=NULL, control=list(maxit=50,tol=1e-6,step.half=25, rank.tol=.Machine$double.eps^0.5)) rank.tol=.Machine$double.eps^0.5),extra.rss=0,n.score=length(y)) } %- maybe also usage' for other objects documented here. \arguments{ ... ... @@ -84,7 +84,19 @@ parameter value.} Basically any singular value less than \code{rank_tol} multiplied by the largest singular value of the problem is set to zero.} } %- end of control \item{extra.rss}{is a constant to be added to the residual sum of squares (squared norm) term in the calculation of the GCV, UBRE and scale parameter estimate. In conjuction with \code{n.score}, this is useful for certain methods for dealing with very large data sets.} \item{n.score}{number to use as the number of data in GCV/UBRE score calculation: usually the actual number of data, but there are methods for dealing with very large datasets that change this.} } \details{ ... ...