Commit 5b7bea96 authored by Dirk Eddelbuettel's avatar Dirk Eddelbuettel

Import Upstream version 1.7-27

parent 8e9a69da
** denotes quite substantial/important changes
*** denotes really big changes
1.7-27
* Further multi-threading in gam fits - final two leading order matrix
operations parallelized using openMP.
* Export of smooth.construct.t2.smooth.spec and Predict.matrix.t2.smooth,
and Rrank.
* Fix of of missing [,,drop=FALSE] in predict.gam that could cause problems
with single row prediction when 'terms' supplied (thanks Yang Yang).
1.7-26
* Namespace fixes.
......
Package: mgcv
Version: 1.7-26
Version: 1.7-27
Author: Simon Wood <simon.wood@r-project.org>
Maintainer: Simon Wood <simon.wood@r-project.org>
Title: Mixed GAM Computation Vehicle with GCV/AIC/REML smoothness
......@@ -14,7 +14,7 @@ Suggests: splines, parallel
LazyLoad: yes
ByteCompile: yes
License: GPL (>= 2)
Packaged: 2013-09-06 09:30:03 UTC; ripley
Packaged: 2013-09-30 20:38:14 UTC; sw283
NeedsCompilation: yes
Repository: CRAN
Date/Publication: 2013-09-06 11:31:13
Date/Publication: 2013-10-01 09:49:00
d364a4f120922c8c4bd91fd70611687b *ChangeLog
82baebca68c3f9b9b0615e9b7daeca0b *DESCRIPTION
1174ecab552f0e905f3d1bf460abc073 *ChangeLog
7e3c4dfbb9e547587aba9c97f5bb5c54 *DESCRIPTION
eb723b61539feef013de476e68b5c50a *GPL-2
4466d00a9e94717f775d55c8fead7f91 *NAMESPACE
c481dc91d92fe1366ba65dac94c4dd97 *NAMESPACE
5b5d847f492e7c5a687bafe83f241a8c *R/bam.r
6f5c9798cb6e42b3431febdd3149a26e *R/fast-REML.r
b65bae33a48ef86e7459192fb74cbee4 *R/gam.fit3.r
38df74295474deb8d94f007d15931bfc *R/fast-REML.r
3aa1f87a3ee196f94610febde5cb2f4f *R/gam.fit3.r
902657a0ee2dedc3fdfa501bf3b37c5b *R/gam.sim.r
d74688ecfa7912cd40f5373154da27d2 *R/gamm.r
dfda3f652d3d245a26a9109f480bbe4e *R/mgcv.r
2c8567dee7cc29736e260a9518361c6a *R/misc.r
2ef4d06f28ea2157372507107970ed82 *R/gamm.r
7431eef037b2dddfb853cd88b53c0bc4 *R/mgcv.r
cc6cc65db0fb325267edc9e861a6dd92 *R/misc.r
f8b84b98c0524c2664885294650af3b8 *R/plots.r
b0b6e71c15742608f2dec02c1d8ae71b *R/smooth.r
90f72ef91dfc2a25c262fc002f781bf7 *R/soap.r
......@@ -17,11 +17,12 @@ e468195a83fab90da8e760c2c3884bd3 *data/columb.polys.rda
40874e3ced720a596750f499ded8a60a *data/columb.rda
9388a0979dab4fe1988b777ae4dcfb0a *inst/CITATION
5bf12ddc0dab9daae72271b96a15c539 *man/Predict.matrix.Rd
c51c9b8c9c73f81895176ded39b91394 *man/Predict.matrix.cr.smooth.Rd
c45c0f78f753461b33a295883461e732 *man/Predict.matrix.cr.smooth.Rd
e9b0a2e31b130cf2cb38618ec50d1919 *man/Predict.matrix.soap.film.Rd
f12468625253dd3603907de233762fd6 *man/Rrank.Rd
3e5e30b44947d3ddf00fcd55be219038 *man/Tweedie.Rd
27b7ae82bab920543222c4428d33cee1 *man/anova.gam.Rd
7408512e0b41ba735bc79fce357282ad *man/bam.Rd
e7301fab74ef54c73a122b397fc101d0 *man/bam.Rd
71bde8b8caa24a36529ce7e0ac3165d8 *man/bam.update.Rd
bb5ec26743d46d7ce1dbf128bceab35a *man/cSplineDes.Rd
9b4d616d1b6c4a46ca77d16cded3f806 *man/choose.k.Rd
......@@ -49,7 +50,7 @@ b2ff252537dd2155524b774b2435e66e *man/gam.vcomp.Rd
a66a814cc4c6f806e824751fda519ae0 *man/gam2objective.Rd
eacda367e277e56bc4cc58153dfa5318 *man/gamObject.Rd
0ac5fb78c9db628ce554a8f68588058c *man/gamSim.Rd
9c301060b3c35bc6ab83e6f38ce8d3b6 *man/gamm.Rd
76276a29a43519f2be3bbb34cbdc385e *man/gamm.Rd
39ae109127110152e97dc9f79e08bb14 *man/get.var.Rd
a2ea1233d43fac89e0cacbc09a8d31e2 *man/in.out.Rd
9c461959be1272edcb98ee7e20fdc317 *man/inSide.Rd
......@@ -68,7 +69,7 @@ aba56a0341ba9526a302e39d33aa9042 *man/interpret.gam.Rd
00ccf213c31910cd14f1df65a300eb33 *man/model.matrix.gam.Rd
bc9b89db7e7ff246749551c16f5f1f07 *man/mono.con.Rd
d33914a328f645af13f5a42914ca0f35 *man/mroot.Rd
bedf887cfc36712ff30e3129720dea96 *man/negbin.Rd
7931452d2f9534c49cfd6e8aa4c3d62f *man/negbin.Rd
8b766a6ad848b0f1ca469e381ded0169 *man/new.name.Rd
7d8f62e182f7c428cc9d46ddd4d97d43 *man/notExp.Rd
445571cf0b115bf19a2d3474e70f59e7 *man/notExp2.Rd
......@@ -98,8 +99,9 @@ db75c958cbfb561914a3291ab58b9786 *man/smooth.construct.fs.smooth.spec.Rd
772e6c18d25cfaf3d9c194031ee042fe *man/smooth.construct.mrf.smooth.spec.Rd
abe15377f471a2d8957a59c19eeef0bb *man/smooth.construct.ps.smooth.spec.Rd
d202c6718fb1138fdd99e6102250aedf *man/smooth.construct.re.smooth.spec.Rd
53fc438bb9c623715ff323e60ca2c367 *man/smooth.construct.so.smooth.spec.Rd
a8d5eb4772641567fee15714f01a4727 *man/smooth.construct.so.smooth.spec.Rd
ce311e544603f1089562747652abce20 *man/smooth.construct.sos.smooth.spec.Rd
3cb4e59f915c8d64b90754eaeeb5a86f *man/smooth.construct.t2.smooth.spec.Rd
8672633a1fad8df3cb1f53d7fa883620 *man/smooth.construct.tensor.smooth.spec.Rd
574641abfbf14339fbb01948235a2a19 *man/smooth.construct.tp.smooth.spec.Rd
75a042d74460dd0c6e0c0a95c1f5d3f7 *man/smooth.terms.Rd
......@@ -124,20 +126,20 @@ d5a0f198090ecbfedaa6549f2918b997 *po/R-po.po
1a4a267ddcb87bb83f09c291d3e97523 *po/fr.po
813514ea4e046ecb4563eb3ae8aa202a *po/mgcv.pot
749ac663240fd6eaa2b725602d47ef2a *po/po.po
c49d921d97e78dcd26088ec6326e6940 *src/Makevars
29b51c84b16b1692061d363065beac3b *src/gdi.c
5d6b6d2ce66b7d3a78ebd10dc0c8da91 *src/Makevars
f278ee892368a664c1c4bfefad77eff4 *src/gdi.c
49af97195accb65adc75620183d39a4c *src/general.h
2a9d03aeee4de9228502620734b6c84c *src/init.c
e12a89a18a1aa847fe958b02f9dc9f80 *src/magic.c
0ed50431782a747557935ad347e4e64d *src/mat.c
f08642ce338f08fb00f829fc01868216 *src/init.c
6dbd109048a3bd18b67db6230c064c21 *src/magic.c
7a3b3d7d2504574094c59ea70912f125 *src/mat.c
6c9101f596440a09ecc3966a2374efd2 *src/matrix.c
0f8448f67d16668f9027084a2d9a1b52 *src/matrix.h
96e43aa805215a0463fd13fd4b3d41f8 *src/mgcv.c
94660efe79de3659f210b023819afed0 *src/mgcv.h
568d1195622cd01aef2f614140b94be0 *src/mgcv.h
c6b8c3093e2970654237c81ccfab1473 *src/misc.c
97bc53816f97fc68a37b2e0716a6dc0a *src/qp.c
cd563899be5b09897d1bf36a7889caa0 *src/qp.h
15ed450da804a9dbd0833459af1f2474 *src/soap.c
449046a076c579cd0fafc137d250e65e *src/sparse-smooth.c
296af5dc0f71ab0199d462c81937ae37 *src/sparse-smooth.c
42f015e636b5fbd2a3a3bb46cbe24eaa *src/tprs.c
5352d5d2298acd9b03ee1895933d4fb4 *src/tprs.h
......@@ -33,8 +33,10 @@ export(anova.gam, bam, bam.update, concurvity, cSplineDes,
Predict.matrix.mrf.smooth,
Predict.matrix.pspline.smooth,
Predict.matrix.random.effect,
Predict.matrix.t2.smooth,
qq.gam,
residuals.gam,rig,rTweedie, s,
residuals.gam,rig,rTweedie,
Rrank,s,
slanczos,
smoothCon,smooth.construct,smooth.construct2,
smooth.construct.cc.smooth.spec,
......@@ -53,6 +55,7 @@ export(anova.gam, bam, bam.update, concurvity, cSplineDes,
smooth.construct.sf.smooth.spec,
smooth.construct.sw.smooth.spec,
smooth.construct.ad.smooth.spec,
smooth.construct.t2.smooth.spec,
summary.gam,sp.vcov,
spasm.construct,spasm.sp,spasm.smooth,
t2,te,ti,tensor.prod.model.matrix,tensor.prod.penalties,
......@@ -70,8 +73,8 @@ importFrom(graphics,axis,box,contour,hist,lines,mtext, par, persp,plot,points,
# var,vcov)
importFrom(stats,anova,influence,cooks.distance,logLik,vcov,residuals,predict,model.matrix)
#importFrom(nlme,Dim,corMatrix,logDet,pdConstruct,pdFactor,pdMatrix,getGroupsFormula,lme,varFixed,lmeControl)
import(nlme)
importFrom(nlme,Dim,corMatrix,logDet,pdConstruct,pdFactor,pdMatrix,getGroupsFormula,lme,varFixed,lmeControl)
#import(nlme)
importMethodsFrom(Matrix,t,colMeans,colSums,chol,solve,lu,expand)
importFrom(Matrix,Diagonal,sparseMatrix,Matrix)
#import(Matrix)
......
......@@ -230,7 +230,7 @@ Sl.initial.repara <- function(Sl,X,inverse=FALSE) {
} ## end Sl.initial.repra
ldetS <- function(Sl,rho,fixed,np,root=FALSE) {
## Get log generalized derivative of S stored blockwise in an Sl list.
## Get log generalized determinant of S stored blockwise in an Sl list.
## Any multi-term blocks will be re-parameterized using gam.reparam, and
## a re-parameterization object supplied in the returned object.
## Returns: Sl, with modified rS terms, if needed and rho added to each block
......
......@@ -39,7 +39,7 @@ gam.reparam <- function(rS,lsp,deriv)
d.tol = as.double(d.tol),
r.tol = as.double(r.tol),
fixed_penalty = as.integer(fixed.penalty))
S <- matrix(oo$S,q,q)
S <- matrix(oo$S,q,q)
S <- (S+t(S))*.5
p <- abs(diag(S))^.5 ## by Choleski, p can not be zero if S +ve def
p[p==0] <- 1 ## but it's possible to make a mistake!!
......@@ -160,10 +160,13 @@ gam.fit3 <- function (x, y, sp, Eb,UrS=list(),
T[1:ncol(rp$Qs),1:ncol(rp$Qs)] <- rp$Qs
T <- U1%*%T ## new params b'=T'b old params
null.coef <- t(T)%*%null.coef
x <- x%*%T ## model matrix
null.coef <- t(T)%*%null.coef
## form x%*%T in parallel
x <- .Call(C_mgcv_pmmult2,x,T,0,0,control$nthreads)
## x <- x%*%T ## model matrix 0(nq^2)
rS <- list()
for (i in 1:length(UrS)) {
rS[[i]] <- rbind(rp$rS[[i]],matrix(0,Mp,ncol(rp$rS[[i]])))
......@@ -406,7 +409,7 @@ gam.fit3 <- function (x, y, sp, Eb,UrS=list(),
}
while (pdev -old.pdev > div.thresh)
{ ## step halve until pdev <= old.pdev
if (ii > 200)
if (ii > 100)
stop("inner loop 3; can't correct step size")
ii <- ii + 1
start <- (start + coefold)/2
......@@ -725,8 +728,10 @@ gam.fit3.post.proc <- function(X,object) {
## get edf array and covariance matrices after a gam fit.
## X is original model matrix
Vb <- object$rV%*%t(object$rV)*object$scale ## Bayesian cov.
PKt <- object$rV%*%t(object$K)
F <- PKt%*%(sqrt(object$weights)*X)
# PKt <- object$rV%*%t(object$K)
PKt <- .Call(C_mgcv_pmmult2,object$rV,object$K,0,1,object$control$nthreads)
# F <- PKt%*%(sqrt(object$weights)*X)
F <- .Call(C_mgcv_pmmult2,PKt,sqrt(object$weights)*X,0,0,object$control$nthreads)
edf <- diag(F) ## effective degrees of freedom
edf1 <- 2*edf - rowSums(t(F)*F) ## alternative
## edf <- rowSums(PKt*t(sqrt(object$weights)*X))
......@@ -736,8 +741,10 @@ gam.fit3.post.proc <- function(X,object) {
## get QR factor R of WX - more efficient to do this
## in gdi_1 really, but that means making QR of augmented
## a two stage thing, so not clear cut...
qrx <- qr(sqrt(object$weights)*X,LAPACK=TRUE)
R <- qr.R(qrx);R[,qrx$pivot] <- R
#qrx <- qr(sqrt(object$weights)*X,LAPACK=TRUE,tol=0)
#R <- qr.R(qrx);R[,qrx$pivot] <- R
qrx <- pqr(sqrt(object$weights)*X,object$control$nthreads)
R <- pqr.R(qrx);R[,qrx$pivot] <- R
##bias = as.numeric(object$coefficients - F%*%object$coefficients)
list(Vb=Vb,Ve=Ve,edf=edf,edf1=edf1,hat=hat,F=F,R=R)
}
......
......@@ -1363,7 +1363,7 @@ gamm <- function(formula,random=NULL,correlation=NULL,family=gaussian(),data=lis
# between the basis constructors provided in mgcv and the gammPQL routine used to estimate the model.
# NOTE: need to fill out the gam object properly
{
##if (!require("nlme")) stop("gamm() requires package nlme to be installed")
## if (!require("nlme")) stop("gamm() requires package nlme to be installed")
control <- do.call("lmeControl",control)
# check that random is a named list
if (!is.null(random))
......
## R routines for the package mgcv (c) Simon Wood 2000-2011
## R routines for the package mgcv (c) Simon Wood 2000-2013
## With contributions from Henric Nilsson
Rrank <- function(R,tol=.Machine$double.eps^.9) {
......@@ -1231,6 +1231,7 @@ gam.outer <- function(lsp,fscale,family,control,method,optimizer,criterion,scale
object$scale <- object$scale.est;object$scale.estimated <- TRUE
}
object$control <- control
mv <- gam.fit3.post.proc(G$X,object)
object$Vp <- mv$Vb
object$hat<-mv$hat
......@@ -1248,7 +1249,7 @@ gam.outer <- function(lsp,fscale,family,control,method,optimizer,criterion,scale
object$sig2 <- object$scale
object
}
} ## gam.outer
get.null.coef <- function(G,start=NULL,etastart=NULL,mustart=NULL,...) {
## Get an estimate of the coefs corresponding to maximum reasonable deviance...
......@@ -2159,22 +2160,22 @@ predict.gam <- function(object,newdata,type="link",se.fit=FALSE,terms=NULL,
names(newdata)->nn # new data names
colnames(object$model)->mn # original names
for (i in 1:length(newdata))
if (nn[i]%in%mn && is.factor(object$model[,nn[i]])) # then so should newdata[[i]] be
{ newdata[[i]]<-factor(newdata[[i]],levels=levels(object$model[,nn[i]])) # set prediction levels to fit levels
if (nn[i]%in%mn && is.factor(object$model[,nn[i]])) { # then so should newdata[[i]] be
newdata[[i]]<-factor(newdata[[i]],levels=levels(object$model[,nn[i]])) # set prediction levels to fit levels
}
# split prediction into blocks, to avoid running out of memory
if (length(newdata)==1) newdata[[2]]<-newdata[[1]] # avoids data frame losing its labels and dimensions below!
if (is.null(dim(newdata[[1]]))) np<-length(newdata[[1]])
else np<-dim(newdata[[1]])[1]
nb<-length(object$coefficients)
if (block.size<1) block.size <- np
n.blocks<-np%/%block.size
b.size<-rep(block.size,n.blocks)
last.block<-np-sum(b.size)
if (length(newdata)==1) newdata[[2]] <- newdata[[1]] # avoids data frame losing its labels and dimensions below!
if (is.null(dim(newdata[[1]]))) np <- length(newdata[[1]])
else np <- dim(newdata[[1]])[1]
nb <- length(object$coefficients)
if (block.size < 1) block.size <- np
n.blocks <- np %/% block.size
b.size <- rep(block.size,n.blocks)
last.block <- np-sum(b.size)
if (last.block>0)
{ n.blocks<-n.blocks+1
b.size[n.blocks]<-last.block
{ n.blocks <- n.blocks+1
b.size[n.blocks] <- last.block
}
} else # no new data, just use object$model
{ np <- nrow(object$model)
......@@ -2184,52 +2185,51 @@ predict.gam <- function(object,newdata,type="link",se.fit=FALSE,terms=NULL,
}
# setup prediction arrays
n.smooth<-length(object$smooth)
if (type=="lpmatrix")
{ H <- matrix(0,np,nb)
} else
if (type=="terms"||type=="iterms")
{ term.labels<-attr(object$pterms,"term.labels")
if (type=="lpmatrix") {
H <- matrix(0,np,nb)
} else if (type=="terms"||type=="iterms") {
term.labels <- attr(object$pterms,"term.labels")
if (is.null(attr(object,"para.only"))) para.only <-FALSE else
para.only <- TRUE # if true then only return information on parametric part
n.pterms <- length(term.labels)
fit <- array(0,c(np,n.pterms+as.numeric(!para.only)*n.smooth))
if (se.fit) se <- fit
ColNames <- term.labels
} else
{ fit <- array(0,np)
} else {
fit <- array(0,np)
if (se.fit) se <- fit
}
stop<-0
stop <- 0
Terms <- delete.response(object$pterms)
s.offset <- NULL # to accumulate any smooth term specific offset
any.soff <- FALSE # indicator of term specific offset existence
if (n.blocks>0) for (b in 1:n.blocks) # work through prediction blocks
{ start<-stop+1
stop<-start+b.size[b]-1
if (n.blocks==1) data <- newdata else data<-newdata[start:stop,]
if (n.blocks > 0) for (b in 1:n.blocks) { # work through prediction blocks
start <- stop+1
stop <- start + b.size[b] - 1
if (n.blocks==1) data <- newdata else data <- newdata[start:stop,]
X <- matrix(0,b.size[b],nb)
Xoff <- matrix(0,b.size[b],n.smooth) ## term specific offsets
## implements safe prediction for parametric part as described in
## http://developer.r-project.org/model-fitting-functions.txt
if (new.data.ok)
{ if (nd.is.mf) mf <- model.frame(data,xlev=object$xlevels) else
if (new.data.ok) {
if (nd.is.mf) mf <- model.frame(data,xlev=object$xlevels) else
{ mf <- model.frame(Terms,data,xlev=object$xlevels)
if (!is.null(cl <- attr(object$pterms,"dataClasses"))) .checkMFClasses(cl,mf)
}
Xp <- model.matrix(Terms,mf,contrasts=object$contrasts)
} else
{ Xp <- model.matrix(Terms,object$model)
} else {
Xp <- model.matrix(Terms,object$model)
mf <- newdata # needed in case of offset, below
}
if (object$nsdf) X[,1:object$nsdf]<-Xp
if (n.smooth) for (k in 1:n.smooth)
{ Xfrag <- PredictMat(object$smooth[[k]],data)
if (object$nsdf) X[,1:object$nsdf] <- Xp
if (n.smooth) for (k in 1:n.smooth) {
Xfrag <- PredictMat(object$smooth[[k]],data)
X[,object$smooth[[k]]$first.para:object$smooth[[k]]$last.para] <- Xfrag
Xfrag.off <- attr(Xfrag,"offset") ## any term specific offsets?
if (!is.null(Xfrag.off)) { Xoff[,k] <- Xfrag.off; any.soff <- TRUE }
if (type=="terms"||type=="iterms") ColNames[n.pterms+k]<-object$smooth[[k]]$label
if (type=="terms"||type=="iterms") ColNames[n.pterms+k] <- object$smooth[[k]]$label
}
if (!is.null(object$Xcentre)) { ## Apply any column centering
......@@ -2238,7 +2238,7 @@ predict.gam <- function(object,newdata,type="link",se.fit=FALSE,terms=NULL,
# have prediction matrix for this block, now do something with it
if (type=="lpmatrix") {
H[start:stop,]<-X
H[start:stop,] <- X
if (any.soff) s.offset <- rbind(s.offset,Xoff)
} else
if (type=="terms"||type=="iterms")
......@@ -2248,14 +2248,14 @@ predict.gam <- function(object,newdata,type="link",se.fit=FALSE,terms=NULL,
for (i in 1:n.pterms)
{ ii <- ind[object$assign==i]
fit[start:stop,i] <- X[,ii,drop=FALSE]%*%object$coefficients[ii]
if (se.fit) se[start:stop,i]<-
if (se.fit) se[start:stop,i] <-
sqrt(rowSums((X[,ii,drop=FALSE]%*%object$Vp[ii,ii])*X[,ii,drop=FALSE]))
}
if (n.smooth&&!para.only)
{ for (k in 1:n.smooth) # work through the smooth terms
{ first<-object$smooth[[k]]$first.para;last<-object$smooth[[k]]$last.para
fit[start:stop,n.pterms+k]<-X[,first:last,drop=FALSE]%*%object$coefficients[first:last] + Xoff[,k]
{ first <- object$smooth[[k]]$first.para; last <- object$smooth[[k]]$last.para
fit[start:stop,n.pterms+k] <- X[,first:last,drop=FALSE] %*% object$coefficients[first:last] + Xoff[,k]
if (se.fit) { # diag(Z%*%V%*%t(Z))^0.5; Z=X[,first:last]; V is sub-matrix of Vp
if (type=="iterms"&& attr(object$smooth[[k]],"nCons")>0) { ## termwise se to "carry the intercept"
X1 <- matrix(object$cmX,nrow(X),ncol(X),byrow=TRUE)
......@@ -2276,24 +2276,30 @@ predict.gam <- function(object,newdata,type="link",se.fit=FALSE,terms=NULL,
# retain only terms of order 1 - this is to make termplot work
order <- attr(object$pterms,"order")
term.labels <- term.labels[order==1]
fit <- as.matrix(as.matrix(fit)[,order==1])
## fit <- as.matrix(as.matrix(fit)[,order==1])
fit <- fit[,order==1,drop=FALSE]
colnames(fit) <- term.labels
if (se.fit) { se <- as.matrix(as.matrix(se)[,order==1])
if (se.fit) { ## se <- as.matrix(as.matrix(se)[,order==1])
se <- se[,order==1,drop=FALSE]
colnames(se) <- term.labels }
}
if (!is.null(terms)) # return only terms requested via `terms'
{ if (sum(!(terms %in%colnames(fit))))
warning("non-existent terms requested - ignoring")
else { names(term.labels) <- term.labels
term.labels <- term.labels[terms] # names lost if only one col
fit <- as.matrix(as.matrix(fit)[,terms])
colnames(fit) <- term.labels
if (se.fit) {se <- as.matrix(as.matrix(se)[,terms])
colnames(se) <- term.labels}
if (!is.null(terms)) { # return only terms requested via `terms'
if (sum(!(terms %in%colnames(fit))))
warning("non-existent terms requested - ignoring")
else {
#names(term.labels) <- term.labels
#term.labels <- term.labels[terms] # names lost if only one col
##fit <- as.matrix(as.matrix(fit)[,terms])
fit <- fit[,terms,drop=FALSE]
#colnames(fit) <- term.labels
if (se.fit) {## se <- as.matrix(as.matrix(se)[,terms])
se <- se[,terms,drop=FALSE]
#colnames(se) <- term.labels
}
}
}
} else # "link" or "response"
{ k<-attr(attr(object$model,"terms"),"offset")
} else { # "link" or "response"
k<-attr(attr(object$model,"terms"),"offset")
fit[start:stop]<-X%*%object$coefficients + rowSums(Xoff)
if (!is.null(k)) fit[start:stop]<-fit[start:stop]+model.offset(mf) + rowSums(Xoff)
if (se.fit) se[start:stop]<-sqrt(rowSums((X%*%object$Vp)*X))
......
......@@ -3,6 +3,25 @@
## for testing purposes
pqr2 <- function(x,nt=1) {
## Function for parallel pivoted qr decomposition of a matrix using LAPACK
## householder routines...
## library(mgcv); n <- 10000;p<-1000;x <- matrix(runif(n*p),n,p)
## system.time(qrx <- qr(x,LAPACK=TRUE))
## system.time(qrx2 <- mgcv:::pqr2(x,2))
## system.time(qrx3 <- mgcv:::pqr(x,2))
## range(qrx2$qr-qrx$qr)
p <- ncol(x)
beta <- rep(0.0,p)
piv <- as.integer(rep(0,p))
xc <- x*1
rank <- .Call(C_mgcv_Rpiqr,xc,beta,piv,nt)
ret <- list(qr=xc,rank=rank,qraux=beta,pivot=piv+1)
attr(ret,"useLAPACK") <- TRUE
class(ret) <- "qr"
ret
}
block.reorder <- function(x,n.blocks=1,reverse=FALSE) {
## takes a matrix x divides it into n.blocks row-wise blocks, and re-orders
## so that the blocks are stored one after the other.
......@@ -25,7 +44,7 @@ pqr <- function(x,nt=1) {
x.c <- ncol(x);r <- nrow(x)
oo <- .C(C_mgcv_pqr,x=as.double(c(x,rep(0,nt*x.c^2))),as.integer(r),as.integer(x.c),
pivot=as.integer(rep(0,x.c)), tau=as.double(rep(0,(nt+1)*x.c)),as.integer(nt))
list(x=oo$x,r=r,c=x.c,tau=oo$tau,pivot=oo$pivot,nt=nt)
list(x=oo$x,r=r,c=x.c,tau=oo$tau,pivot=oo$pivot+1,nt=nt)
}
pqr.R <- function(x) {
......
......@@ -6,6 +6,7 @@
\alias{Predict.matrix.tensor.smooth}
\alias{Predict.matrix.tprs.smooth}
\alias{Predict.matrix.ts.smooth}
\alias{Predict.matrix.t2.smooth}
%- Also NEED an `\alias' for EACH other topic documented here.
\title{Predict matrix method functions}
\description{The various built in smooth classes for use with \code{\link{gam}} have associate \code{\link{Predict.matrix}}
......@@ -20,6 +21,7 @@ method functions to enable prediction from the fitted model. }
\method{Predict.matrix}{tensor.smooth}(object, data)
\method{Predict.matrix}{tprs.smooth}(object, data)
\method{Predict.matrix}{ts.smooth}(object, data)
\method{Predict.matrix}{t2.smooth}(object, data)
}
\arguments{
......
\name{Rrank}
\alias{Rrank}
%- Also NEED an `\alias' for EACH other topic documented here.
\title{Find rank of upper triangular matrix}
\description{
Finds rank of upper triangular matrix R, by estimating condition
number of upper \code{rank} by \code{rank} block, and reducing \code{rank}
until this is acceptably low. Assumes R has been computed by a method that uses
pivoting, usually pivoted QR or Choleski.
}
\usage{
Rrank(R,tol=.Machine$double.eps^.9)
}
%- maybe also `usage' for other objects documented here.
\arguments{
\item{R}{An upper triangular matrix, obtained by pivoted QR or pivoted Choleski.}
\item{tol}{the tolerance to use for judging rank.}
}
\details{ The method is based on Cline et al. (1979) as described in Golub and van Loan (1996).
}
\author{ Simon N. Wood \email{simon.wood@r-project.org}}
\references{
Cline, A.K., C.B. Moler, G.W. Stewart and J.H. Wilkinson (1979)
An estimate for the condition number of a matrix.
SIAM J. Num. Anal. 16, 368-375
Golub, G.H, and C.F. van Loan (1996)
Matrix Computations 3rd ed.
Johns Hopkins University Press, Baltimore.
}
\examples{
set.seed(0)
n <- 10;p <- 5
X <- matrix(runif(n*(p-1)),n,p)
qrx <- qr(X,LAPACK=TRUE)
Rrank(qr.R(qrx))
}
\keyword{models} \keyword{smooth} \keyword{regression}%-- one or more ..
......@@ -207,8 +207,8 @@ library(mgcv)
## Some examples are marked 'Not run' purely to keep
## checking load on CRAN down. Sample sizes are small for
## the same reason.
dat <- gamSim(1,n=20000,dist="normal",scale=20)
set.seed(3)
dat <- gamSim(1,n=15000,dist="normal",scale=20)
bs <- "cr";k <- 20
b <- bam(y ~ s(x0,bs=bs,k=k)+s(x1,bs=bs,k=k)+s(x2,bs=bs,k=k)+
s(x3,bs=bs,k=k),data=dat)
......@@ -225,8 +225,8 @@ summary(ba)}
k <- 15
dat <- gamSim(1,n=11000,dist="poisson",scale=.1)
system.time(b1 <- bam(y ~ s(x0,bs=bs,k=k)+s(x1,bs=bs,k=k)+s(x2,bs=bs,k=k)+
s(x3,bs=bs,k=k),data=dat,family=poisson()))
system.time(b1 <- bam(y ~ s(x0,bs=bs,k=k)+s(x1,bs=bs,k=k)+s(x2,bs=bs,k=k),
data=dat,family=poisson()))
b1
## repeat on a cluster (need much larger n to be worthwhile!)
......@@ -237,14 +237,14 @@ if (detectCores()>1) { ## no point otherwise
## could also use makeForkCluster, but read warnings first!
} else cl <- NULL
system.time(b2 <- bam(y ~ s(x0,bs=bs,k=k)+s(x1,bs=bs,k=k)+s(x2,bs=bs,k=k)+
s(x3,bs=bs,k=k),data=dat,family=poisson(),cluster=cl))
system.time(b2 <- bam(y ~ s(x0,bs=bs,k=k)+s(x1,bs=bs,k=k)+s(x2,bs=bs,k=k)
,data=dat,family=poisson(),cluster=cl))
## ... first call has startup overheads, repeat shows speed up...
\dontrun{
system.time(b2 <- bam(y ~ s(x0,bs=bs,k=k)+s(x1,bs=bs,k=k)+s(x2,bs=bs,k=k)+
s(x3,bs=bs,k=k),data=dat,family=poisson(),cluster=cl))
system.time(b2 <- bam(y ~ s(x0,bs=bs,k=k)+s(x1,bs=bs,k=k)+s(x2,bs=bs,k=k)
,data=dat,family=poisson(),cluster=cl))
}
fv <- predict(b2,cluster=cl) ## parallel prediction
......
......@@ -242,7 +242,7 @@ Linked smoothing parameters and adaptive smoothing are not supported.
library(mgcv)
## simple examples using gamm as alternative to gam
set.seed(0)
dat <- gamSim(1,n=400,scale=2)
dat <- gamSim(1,n=200,scale=2)
b <- gamm(y~s(x0)+s(x1)+s(x2)+s(x3),data=dat)
plot(b$gam,pages=1)
summary(b$lme) # details of underlying lme fit
......@@ -257,8 +257,8 @@ par(op)
rm(dat)
## Add a factor to the linear predictor, to be modelled as random
dat <- gamSim(6,n=400,scale=.2,dist="poisson")
b2<-gamm(y~s(x0)+s(x1)+s(x2)+s(x3),family=poisson,
dat <- gamSim(6,n=200,scale=.2,dist="poisson")
b2<-gamm(y~s(x0)+s(x1)+s(x2),family=poisson,
data=dat,random=list(fac=~1))
plot(b2$gam,pages=1)
fac <- dat$fac
......@@ -267,7 +267,7 @@ vis.gam(b2$gam)
## now an example with autocorrelated errors....
n <- 400;sig <- 2
n <- 200;sig <- 2
x <- 0:(n-1)/(n-1)
f <- 0.2*x^11*(10*(1-x))^6+10*(10*x)^3*(1-x)^10
e <- rnorm(n,0,sig)
......@@ -308,9 +308,9 @@ for (i in 1:9) rb <- c(rb,rep(rnorm(4),rep(n.g,4)))
## simulate auto-correlated errors within groups
e<-array(0,0)
for (i in 1:40) {
eg <- rnorm(n.g, 0, sig)
for (j in 2:n.g) eg[j] <- eg[j-1]*0.6+ eg[j]
e<-c(e,eg)
eg <- rnorm(n.g, 0, sig)
for (j in 2:n.g) eg[j] <- eg[j-1]*0.6+ eg[j]
e<-c(e,eg)
}
dat$y <- f + ra + rb + e
dat$fa <- fa;dat$fb <- fb
......@@ -322,7 +322,7 @@ plot(b$gam,pages=1)
summary(b$gam)
vis.gam(b$gam)
## and a "spatial" example...
library(nlme);set.seed(1);n <- 200
library(nlme);set.seed(1);n <- 100
dat <- gamSim(2,n=n,scale=0) ## standard example
attach(dat)
old.par<-par(mfrow=c(2,2))
......
......@@ -142,7 +142,7 @@ print(b3)}
## another example...
set.seed(1)
f <- dat$f
f <- f - min(f);g <- f^2
f <- f - min(f)+5;g <- f^2/10
dat$y <- rnbinom(g,size=3,mu=g)
b <- gam(y~s(x0)+s(x1)+s(x2)+s(x3),family=negbin(1:10,link="sqrt"),
data=dat)
......
......@@ -153,7 +153,8 @@ nmax <- 100
knots <- data.frame(v=rep(seq(-.5,3,by=.5),4),
w=rep(c(-.6,-.3,.3,.6),rep(8,4)))
## Simulate some fitting data, inside boundary...
n<-1000
set.seed(0)
n<-600
v <- runif(n)*5-1;w<-runif(n)*2-1
y <- fs.test(v,w,b=1)
names(fsb[[1]]) <- c("v","w")
......@@ -305,7 +306,7 @@ n <- length(x)
z <- f1(x,y) + rnorm(n)*.1
## Fit a soap film smooth to the noisy data
nmax <- 100
nmax <- 60
b <- gam(z~s(x,y,k=c(30,15),bs="so",xt=list(bnd=bnd,nmax=nmax)),knots=knots,method="REML")
plot(b) ## default plot
vis.gam(b,plot.type="contour") ## prettier version
......
\name{smooth.construct.t2.smooth.spec}
\alias{smooth.construct.t2.smooth.spec}
%- Also NEED an `\alias' for EACH other topic documented here.
\title{Tensor product smoothing constructor}
\description{A special \code{smooth.construct} method function for creating tensor product smooths from any
combination of single penalty marginal smooths, using the construction of Wood, Scheipl and Faraway (2013).
}
\usage{
\method{smooth.construct}{t2.smooth.spec}(object, data, knots)
}
\arguments{
\item{object}{a smooth specification object of class \code{t2.smooth.spec},
usually generated by a term like \code{t2(x,z)} in a \code{\link{gam}} model formula}
\item{data}{a list containing just the data (including any \code{by} variable) required by this term,
with names corresponding to \code{object$term} (and \code{object$by}). The \code{by} variable
is the last element.}
\item{knots}{a list containing any knots supplied for basis setup --- in same order and with same names as \code{data}.
Can be \code{NULL}. See details for further information.}
}
\value{ An object of class \code{"t2.smooth"}.
}
\details{Tensor product smooths are smooths of several variables which allow the degree of smoothing to be different with respect
to different variables. They are useful as smooth interaction terms, as they are invariant to linear rescaling of the covariates,
which means, for example, that they are insensitive to the measurement units of the different covariates. They are also useful
whenever isotropic smoothing is inappropriate. See \code{\link{t2}}, \code{\link{te}}, \code{\link{smooth.construct}} and
\code{\link{smooth.terms}}. The construction employed here produces tensor smooths for which the smoothing penalties are non-overlapping portions of the identity matrix. This makes their estimation by mixed modelling software rather easy.
}
\references{
Wood, S.N., F. Scheipl and J.J. Faraway (2013) Straightforward intermediate rank tensor product smoothing in mixed models.
Statistics and Computing 23: 341-360.
}
\author{ Simon N. Wood \email{simon.wood@r-project.org}}
\seealso{\code{\link{t2}}}
\examples{
## see ?t2
}
\keyword{models} \keyword{regression}%-- one or more ..
PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) $(SHLIB_OPENMP_CFLAGS)
PKG_CFLAGS = $(SHLIB_OPENMP_CFLAGS)
#PKG_CFLAGS = -Wall -pedantic -g -O0 $(SHLIB_OPENMP_CFLAGS)
#PKG_CFLAGS = -Wall -pedantic $(SHLIB_OPENMP_CFLAGS)
## `#' out previous line for release