stats-Ex.Rout.save 538 KB
Newer Older
1

2
R version 3.5.2 RC (2018-12-13 r75856) -- "Eggshell Igloo"
3
Copyright (C) 2018 The R Foundation for Statistical Computing
4
Platform: x86_64-pc-linux-gnu (64-bit)
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24

R is free software and comes with ABSOLUTELY NO WARRANTY.
You are welcome to redistribute it under certain conditions.
Type 'license()' or 'licence()' for distribution details.

  Natural language support but running in an English locale

R is a collaborative project with many contributors.
Type 'contributors()' for more information and
'citation()' on how to cite R or R packages in publications.

Type 'demo()' for some demos, 'help()' for on-line help, or
'help.start()' for an HTML browser interface to help.
Type 'q()' to quit R.

> pkgname <- "stats"
> source(file.path(R.home("share"), "R", "examples-header.R"))
> options(warn = 1)
> library('stats')
> 
25
> base::assign(".oldSearch", base::search(), pos = 'CheckExEnv')
26
> base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv')
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
> cleanEx()
> nameEx("AIC")
> ### * AIC
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: AIC
> ### Title: Akaike's An Information Criterion
> ### Aliases: AIC BIC
> ### Keywords: models
> 
> ### ** Examples
> 
> lm1 <- lm(Fertility ~ . , data = swiss)
> AIC(lm1)
[1] 326.0716
> stopifnot(all.equal(AIC(lm1),
+                     AIC(logLik(lm1))))
> BIC(lm1)
[1] 339.0226
> 
> lm2 <- update(lm1, . ~ . -Examination)
> AIC(lm1, lm2)
    df      AIC
lm1  7 326.0716
lm2  6 325.2408
> BIC(lm1, lm2)
    df      BIC
lm1  7 339.0226
lm2  6 336.3417
> 
> 
> 
> cleanEx()
> nameEx("ARMAacf")
> ### * ARMAacf
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ARMAacf
> ### Title: Compute Theoretical ACF for an ARMA Process
> ### Aliases: ARMAacf
> ### Keywords: ts
> 
> ### ** Examples
> 
> ARMAacf(c(1.0, -0.25), 1.0, lag.max = 10)
          0           1           2           3           4           5 
1.000000000 0.875000000 0.625000000 0.406250000 0.250000000 0.148437500 
          6           7           8           9          10 
0.085937500 0.048828125 0.027343750 0.015136719 0.008300781 
> 
> ## Example from Brockwell & Davis (1991, pp.92-4)
80 81 82 83 84 85 86 87 88 89
> ## answer: 2^(-n) * (32/3 + 8 * n) /(32/3)
> n <- 1:10
> a.n <- 2^(-n) * (32/3 + 8 * n) /(32/3)
> (A.n <- ARMAacf(c(1.0, -0.25), 1.0, lag.max = 10))
          0           1           2           3           4           5 
1.000000000 0.875000000 0.625000000 0.406250000 0.250000000 0.148437500 
          6           7           8           9          10 
0.085937500 0.048828125 0.027343750 0.015136719 0.008300781 
> stopifnot(all.equal(unname(A.n), c(1, a.n)))
> 
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
> ARMAacf(c(1.0, -0.25), 1.0, lag.max = 10, pacf = TRUE)
 [1]  0.8750000 -0.6000000  0.3750000 -0.2727273  0.2142857 -0.1764706
 [7]  0.1500000 -0.1304348  0.1153846 -0.1034483
> zapsmall(ARMAacf(c(1.0, -0.25), lag.max = 10, pacf = TRUE))
 [1]  0.80 -0.25  0.00  0.00  0.00  0.00  0.00  0.00  0.00  0.00
> 
> ## Cov-Matrix of length-7 sub-sample of AR(1) example:
> toeplitz(ARMAacf(0.8, lag.max = 7))
          [,1]     [,2]    [,3]   [,4]   [,5]    [,6]     [,7]      [,8]
[1,] 1.0000000 0.800000 0.64000 0.5120 0.4096 0.32768 0.262144 0.2097152
[2,] 0.8000000 1.000000 0.80000 0.6400 0.5120 0.40960 0.327680 0.2621440
[3,] 0.6400000 0.800000 1.00000 0.8000 0.6400 0.51200 0.409600 0.3276800
[4,] 0.5120000 0.640000 0.80000 1.0000 0.8000 0.64000 0.512000 0.4096000
[5,] 0.4096000 0.512000 0.64000 0.8000 1.0000 0.80000 0.640000 0.5120000
[6,] 0.3276800 0.409600 0.51200 0.6400 0.8000 1.00000 0.800000 0.6400000
[7,] 0.2621440 0.327680 0.40960 0.5120 0.6400 0.80000 1.000000 0.8000000
[8,] 0.2097152 0.262144 0.32768 0.4096 0.5120 0.64000 0.800000 1.0000000
> 
> 
> 
> cleanEx()
> nameEx("ARMAtoMA")
> ### * ARMAtoMA
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ARMAtoMA
> ### Title: Convert ARMA Process to Infinite MA Process
> ### Aliases: ARMAtoMA
> ### Keywords: ts
> 
> ### ** Examples
> 
> ARMAtoMA(c(1.0, -0.25), 1.0, 10)
 [1] 2.00000000 1.75000000 1.25000000 0.81250000 0.50000000 0.29687500
 [7] 0.17187500 0.09765625 0.05468750 0.03027344
> ## Example from Brockwell & Davis (1991, p.92)
> ## answer (1 + 3*n)*2^(-n)
> n <- 1:10; (1 + 3*n)*2^(-n)
 [1] 2.00000000 1.75000000 1.25000000 0.81250000 0.50000000 0.29687500
 [7] 0.17187500 0.09765625 0.05468750 0.03027344
> 
> 
> 
> cleanEx()
> nameEx("Beta")
> ### * Beta
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Beta
> ### Title: The Beta Distribution
> ### Aliases: Beta dbeta pbeta qbeta rbeta
> ### Keywords: distribution
> 
> ### ** Examples
> 
147
> x <- seq(0, 1, length = 21)
148 149 150 151 152 153
> dbeta(x, 1, 1)
 [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
> pbeta(x, 1, 1)
 [1] 0.00 0.05 0.10 0.15 0.20 0.25 0.30 0.35 0.40 0.45 0.50 0.55 0.60 0.65 0.70
[16] 0.75 0.80 0.85 0.90 0.95 1.00
> 
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
> ## Visualization, including limit cases:
> pl.beta <- function(a,b, asp = if(isLim) 1, ylim = if(isLim) c(0,1.1)) {
+   if(isLim <- a == 0 || b == 0 || a == Inf || b == Inf) {
+     eps <- 1e-10
+     x <- c(0, eps, (1:7)/16, 1/2+c(-eps,0,eps), (9:15)/16, 1-eps, 1)
+   } else {
+     x <- seq(0, 1, length = 1025)
+   }
+   fx <- cbind(dbeta(x, a,b), pbeta(x, a,b), qbeta(x, a,b))
+   f <- fx; f[fx == Inf] <- 1e100
+   matplot(x, f, ylab="", type="l", ylim=ylim, asp=asp,
+           main = sprintf("[dpq]beta(x, a=%g, b=%g)", a,b))
+   abline(0,1,     col="gray", lty=3)
+   abline(h = 0:1, col="gray", lty=3)
+   legend("top", paste0(c("d","p","q"), "beta(x, a,b)"),
+          col=1:3, lty=1:3, bty = "n")
+   invisible(cbind(x, fx))
+ }
> pl.beta(3,1)
> 
> pl.beta(2, 4)
> pl.beta(3, 7)
> pl.beta(3, 7, asp=1)
> 
> pl.beta(0, 0)   ## point masses at  {0, 1}
> 
> pl.beta(0, 2)   ## point mass at 0 ; the same as
> pl.beta(1, Inf)
> 
> pl.beta(Inf, 2) ## point mass at 1 ; the same as
> pl.beta(3, 0)
> 
> pl.beta(Inf, Inf)# point mass at 1/2
> 
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
> 
> 
> cleanEx()
> nameEx("Binomial")
> ### * Binomial
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Binomial
> ### Title: The Binomial Distribution
> ### Aliases: Binomial dbinom pbinom qbinom rbinom
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> # Compute P(45 < X < 55) for X Binomial(100,0.5)
> sum(dbinom(46:54, 100, 0.5))
[1] 0.6317984
> 
> ## Using "log = TRUE" for an extended range :
> n <- 2000
> k <- seq(0, n, by = 20)
211
> plot (k, dbinom(k, n, pi/10, log = TRUE), type = "l", ylab = "log density",
212
+       main = "dbinom(*, log=TRUE) is better than  log(dbinom(*))")
213
> lines(k, log(dbinom(k, n, pi/10)), col = "red", lwd = 2)
214
> ## extreme points are omitted since dbinom gives 0.
215 216 217
> mtext("dbinom(k, log=TRUE)", adj = 0)
> mtext("extended range", adj = 0, line = -1, font = 4)
> mtext("log(dbinom(k))", col = "red", adj = 1)
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
> 
> 
> 
> cleanEx()
> nameEx("Cauchy")
> ### * Cauchy
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Cauchy
> ### Title: The Cauchy Distribution
> ### Aliases: Cauchy dcauchy pcauchy qcauchy rcauchy
> ### Keywords: distribution
> 
> ### ** Examples
> 
> dcauchy(-1:4)
[1] 0.15915494 0.31830989 0.15915494 0.06366198 0.03183099 0.01872411
> 
> 
> 
> cleanEx()
> nameEx("Chisquare")
> ### * Chisquare
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Chisquare
> ### Title: The (non-central) Chi-Squared Distribution
> ### Aliases: Chisquare dchisq pchisq qchisq rchisq
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
254
> dchisq(1, df = 1:3)
255
[1] 0.2419707 0.3032653 0.2419707
256
> pchisq(1, df =  3)
257
[1] 0.198748
258
> pchisq(1, df =  3, ncp = 0:4)  # includes the above
259 260 261 262
[1] 0.19874804 0.13229855 0.08787311 0.05824691 0.03853592
> 
> x <- 1:10
> ## Chi-squared(df = 2) is a special exponential distribution
263
> all.equal(dchisq(x, df = 2), dexp(x, 1/2))
264
[1] TRUE
265
> all.equal(pchisq(x, df = 2), pexp(x, 1/2))
266 267
[1] TRUE
> 
268
> ## non-central RNG -- df = 0 with ncp > 0:  Z0 has point mass at 0!
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
> Z0 <- rchisq(100, df = 0, ncp = 2.)
> graphics::stem(Z0)

  The decimal point is at the |

  0 | 0000000000000000000000000000000000000013356778899
  1 | 0001333456678888899
  2 | 0011444467
  3 | 00233345888
  4 | 111246
  5 | 
  6 | 
  7 | 178
  8 | 23

> 
> 
> ## "analytical" test
287 288 289
> lam <- seq(0, 100, by = .25)
> p00 <- pchisq(0,      df = 0, ncp = lam)
> p.0 <- pchisq(1e-300, df = 0, ncp = lam)
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
> stopifnot(all.equal(p00, exp(-lam/2)),
+           all.equal(p.0, exp(-lam/2)))
> 
> 
> 
> cleanEx()
> nameEx("Exponential")
> ### * Exponential
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Exponential
> ### Title: The Exponential Distribution
> ### Aliases: Exponential dexp pexp qexp rexp
> ### Keywords: distribution
> 
> ### ** Examples
> 
> dexp(1) - exp(-1) #-> 0
[1] 0
> 
311 312 313 314 315 316
> ## a fast way to generate *sorted*  U[0,1]  random numbers:
> rsunif <- function(n) { n1 <- n+1
+    cE <- cumsum(rexp(n1)); cE[seq_len(n)]/cE[n1] }
> plot(rsunif(1000), ylim=0:1, pch=".")
> abline(0,1/(1000+1), col=adjustcolor(1, 0.5))
> 
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
> 
> 
> cleanEx()
> nameEx("Fdist")
> ### * Fdist
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: FDist
> ### Title: The F Distribution
> ### Aliases: FDist df pf qf rf
> ### Keywords: distribution
> 
> ### ** Examples
> 
332 333 334 335 336 337 338 339
> ## Equivalence of pt(.,nu) with pf(.^2, 1,nu):
> x <- seq(0.001, 5, len = 100)
> nu <- 4
> stopifnot(all.equal(2*pt(x,nu) - 1, pf(x^2, 1,nu)),
+           ## upper tails:
+  	  all.equal(2*pt(x,     nu, lower=FALSE),
+ 		      pf(x^2, 1,nu, lower=FALSE)))
> 
340 341 342 343 344
> ## the density of the square of a t_m is 2*dt(x, m)/(2*x)
> # check this is the same as the density of F_{1,m}
> all.equal(df(x^2, 1, 5), dt(x, 5)/x)
[1] TRUE
> 
345
> ## Identity:  qf(2*p - 1, 1, df) == qt(p, df)^2  for  p >= 1/2
346 347
> p <- seq(1/2, .99, length = 50); df <- 10
> rel.err <- function(x, y) ifelse(x == y, 0, abs(x-y)/mean(abs(c(x,y))))
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
> 
> 
> 
> cleanEx()
> nameEx("GammaDist")
> ### * GammaDist
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: GammaDist
> ### Title: The Gamma Distribution
> ### Aliases: GammaDist dgamma pgamma qgamma rgamma
> ### Keywords: distribution
> 
> ### ** Examples
> 
364
> -log(dgamma(1:4, shape = 1))
365 366
[1] 1 2 3 4
> p <- (1:9)/10
367
> pgamma(qgamma(p, shape = 2), shape = 2)
368
[1] 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9
369
> 1 - 1/exp(qgamma(p, shape = 1))
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
[1] 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9
> 
> 
> 
> cleanEx()
> nameEx("Geometric")
> ### * Geometric
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Geometric
> ### Title: The Geometric Distribution
> ### Aliases: Geometric dgeom pgeom qgeom rgeom
> ### Keywords: distribution
> 
> ### ** Examples
> 
> qgeom((1:9)/10, prob = .2)
[1]  0  0  1  2  3  4  5  7 10
> Ni <- rgeom(20, prob = 1/4); table(factor(Ni, 0:max(Ni)))

 0  1  2  3  4  5  6  7  8  9 10 11 
 5  3  3  1  2  2  0  1  0  1  1  1 
> 
> 
> 
> cleanEx()
> nameEx("HoltWinters")
> ### * HoltWinters
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: HoltWinters
> ### Title: Holt-Winters Filtering
> ### Aliases: HoltWinters print.HoltWinters residuals.HoltWinters
> ### Keywords: ts
> 
> ### ** Examples
> 
> ## Don't show: 
410
> od <- options(digits = 5)
411
> ## End(Don't show)
412 413 414 415 416 417 418
> require(graphics)
> 
> ## Seasonal Holt-Winters
> (m <- HoltWinters(co2))
Holt-Winters exponential smoothing with trend and additive seasonal component.

Call:
419
HoltWinters(x = co2)
420 421

Smoothing parameters:
422 423 424
 alpha: 0.51265
 beta : 0.0094977
 gamma: 0.47289
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448

Coefficients:
         [,1]
a   364.76162
b     0.12474
s1    0.22153
s2    0.95528
s3    1.59847
s4    2.87580
s5    3.28201
s6    2.44070
s7    0.89694
s8   -1.37964
s9   -3.41124
s10  -3.25702
s11  -1.91349
s12  -0.58442
> plot(m)
> plot(fitted(m))
> 
> (m <- HoltWinters(AirPassengers, seasonal = "mult"))
Holt-Winters exponential smoothing with trend and multiplicative seasonal component.

Call:
449
HoltWinters(x = AirPassengers, seasonal = "mult")
450 451

Smoothing parameters:
452 453 454
 alpha: 0.27559
 beta : 0.032693
 gamma: 0.87073
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483

Coefficients:
         [,1]
a   469.32322
b     3.02154
s1    0.94646
s2    0.88292
s3    0.97174
s4    1.03048
s5    1.04769
s6    1.18053
s7    1.35908
s8    1.33317
s9    1.10834
s10   0.98688
s11   0.83613
s12   0.92099
> plot(m)
> 
> ## Non-Seasonal Holt-Winters
> x <- uspop + rnorm(uspop, sd = 5)
> m <- HoltWinters(x, gamma = FALSE)
> plot(m)
> 
> ## Exponential Smoothing
> m2 <- HoltWinters(x, gamma = FALSE, beta = FALSE)
> lines(fitted(m2)[,1], col = 3)
> ## Don't show: 
> options(od)
484
> ## End(Don't show)
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
> 
> 
> 
> cleanEx()
> nameEx("Hypergeometric")
> ### * Hypergeometric
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Hypergeometric
> ### Title: The Hypergeometric Distribution
> ### Aliases: Hypergeometric dhyper phyper qhyper rhyper
> ### Keywords: distribution
> 
> ### ** Examples
> 
> m <- 10; n <- 7; k <- 8
> x <- 0:(k+1)
> rbind(phyper(x, m, n, k), dhyper(x, m, n, k))
     [,1]         [,2]       [,3]     [,4]      [,5]      [,6]      [,7]
[1,]    0 0.0004113534 0.01336898 0.117030 0.4193747 0.7821884 0.9635952
[2,]    0 0.0004113534 0.01295763 0.103661 0.3023447 0.3628137 0.1814068
           [,8]       [,9] [,10]
[1,] 0.99814891 1.00000000     1
[2,] 0.03455368 0.00185109     0
510
> all(phyper(x, m, n, k) == cumsum(dhyper(x, m, n, k)))  # FALSE
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
[1] FALSE
> 
> 
> cleanEx()
> nameEx("IQR")
> ### * IQR
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: IQR
> ### Title: The Interquartile Range
> ### Aliases: IQR
> ### Keywords: univar robust distribution
> 
> ### ** Examples
> 
> IQR(rivers)
[1] 370
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
> 
> 
> 
> cleanEx()
> nameEx("KalmanLike")
> ### * KalmanLike
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: KalmanLike
> ### Title: Kalman Filtering
> ### Aliases: KalmanLike KalmanRun KalmanSmooth KalmanForecast makeARIMA
> ### Keywords: ts
> 
> ### ** Examples
> 
> ## an ARIMA fit
> fit3 <- arima(presidents, c(3, 0, 0))
> predict(fit3, 12)
$pred
         Qtr1     Qtr2     Qtr3     Qtr4
1975 29.84194 34.41014 39.30815 43.02779
1976 46.18808 48.56947 50.44866 51.86064
1977 52.94295 53.75521 54.37019 54.83150

$se
         Qtr1     Qtr2     Qtr3     Qtr4
1975  9.00655 11.25606 13.43389 14.51516
1976 15.25538 15.65611 15.90158 16.03792
1977 16.11764 16.16229 16.18785 16.20220

> ## reconstruct this
> pr <- KalmanForecast(12, fit3$model)
> pr$pred + fit3$coef[4]
 [1] 29.84194 34.41014 39.30815 43.02779 46.18808 48.56947 50.44866 51.86064
 [9] 52.94295 53.75521 54.37019 54.83150
> sqrt(pr$var * fit3$sigma2)
 [1]  9.00655 11.25606 13.43389 14.51516 15.25538 15.65611 15.90158 16.03792
 [9] 16.11764 16.16229 16.18785 16.20220
> ## and now do it year by year
> mod <- fit3$model
> for(y in 1:3) {
+   pr <- KalmanForecast(4, mod, TRUE)
+   print(list(pred = pr$pred + fit3$coef["intercept"], 
+              se = sqrt(pr$var * fit3$sigma2)))
574
+   mod <- attr(pr, "mod")
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
+ }
$pred
[1] 29.84194 34.41014 39.30815 43.02779

$se
[1]  9.00655 11.25606 13.43389 14.51516

$pred
[1] 46.18808 48.56947 50.44866 51.86064

$se
[1] 15.25538 15.65611 15.90158 16.03792

$pred
[1] 52.94295 53.75521 54.37019 54.83150

$se
[1] 16.11764 16.16229 16.18785 16.20220

594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
> 
> 
> 
> cleanEx()
> nameEx("Logistic")
> ### * Logistic
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Logistic
> ### Title: The Logistic Distribution
> ### Aliases: Logistic dlogis plogis qlogis rlogis
> ### Keywords: distribution
> 
> ### ** Examples
> 
610
> var(rlogis(4000, 0, scale = 5))  # approximately (+/- 3)
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
[1] 86.93007
> pi^2/3 * 5^2
[1] 82.2467
> 
> 
> 
> cleanEx()
> nameEx("Lognormal")
> ### * Lognormal
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Lognormal
> ### Title: The Log Normal Distribution
> ### Aliases: Lognormal dlnorm plnorm qlnorm rlnorm
> ### Keywords: distribution
> 
> ### ** Examples
> 
> dlnorm(1) == dnorm(0)
[1] TRUE
> 
> 
> 
> cleanEx()
> nameEx("Multinom")
> ### * Multinom
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Multinom
> ### Title: The Multinomial Distribution
> ### Aliases: Multinomial rmultinom dmultinom
> ### Keywords: distribution
> 
> ### ** Examples
> 
648
> rmultinom(10, size = 12, prob = c(0.1,0.2,0.8))
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
     [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
[1,]    0    1    0    3    1    0    1    2    2     1
[2,]    2    4    4    2    0    1    2    2    5     3
[3,]   10    7    8    7   11   11    9    8    5     8
> 
> pr <- c(1,3,6,10) # normalization not necessary for generation
> rmultinom(10, 20, prob = pr)
     [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
[1,]    3    0    0    0    1    1    1    1    1     1
[2,]    1    2    3    3    2    4    3    4    4     4
[3,]    7    6    9    7    8    3    8    6    2     7
[4,]    9   12    8   10    9   12    8    9   13     8
> 
> ## all possible outcomes of Multinom(N = 3, K = 3)
> X <- t(as.matrix(expand.grid(0:3, 0:3))); X <- X[, colSums(X) <= 3]
> X <- rbind(X, 3:3 - colSums(X)); dimnames(X) <- list(letters[1:3], NULL)
> X
  [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
a    0    1    2    3    0    1    2    0    1     0
b    0    0    0    0    1    1    1    2    2     3
c    3    2    1    0    2    1    0    1    0     0
> round(apply(X, 2, function(x) dmultinom(x, prob = c(1,2,5))), 3)
 [1] 0.244 0.146 0.029 0.002 0.293 0.117 0.012 0.117 0.023 0.016
> 
> 
> 
> cleanEx()
> nameEx("NLSstAsymptotic")
> ### * NLSstAsymptotic
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: NLSstAsymptotic
> ### Title: Fit the Asymptotic Regression Model
> ### Aliases: NLSstAsymptotic NLSstAsymptotic.sortedXyData
> ### Keywords: manip
> 
> ### ** Examples
> 
> Lob.329 <- Loblolly[ Loblolly$Seed == "329", ]
> print(NLSstAsymptotic(sortedXyData(expression(age),
+                                    expression(height),
691
+                                    Lob.329)), digits = 3)
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
    b0     b1    lrc 
 -8.25 102.38  -3.22 
> 
> 
> 
> cleanEx()
> nameEx("NLSstClosestX")
> ### * NLSstClosestX
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: NLSstClosestX
> ### Title: Inverse Interpolation
> ### Aliases: NLSstClosestX NLSstClosestX.sortedXyData
> ### Keywords: manip
> 
> ### ** Examples
> 
> DNase.2 <- DNase[ DNase$Run == "2", ]
> DN.srt <- sortedXyData(expression(log(conc)), expression(density), DNase.2)
> NLSstClosestX(DN.srt, 1.0)
[1] 0.9795406
> 
> 
> 
> cleanEx()
> nameEx("NLSstLfAsymptote")
> ### * NLSstLfAsymptote
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: NLSstLfAsymptote
> ### Title: Horizontal Asymptote on the Left Side
> ### Aliases: NLSstLfAsymptote NLSstLfAsymptote.sortedXyData
> ### Keywords: manip
> 
> ### ** Examples
> 
> DNase.2 <- DNase[ DNase$Run == "2", ]
> DN.srt <- sortedXyData( expression(log(conc)), expression(density), DNase.2 )
> NLSstLfAsymptote( DN.srt )
[1] -0.1869375
> 
> 
> 
> cleanEx()
> nameEx("NLSstRtAsymptote")
> ### * NLSstRtAsymptote
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: NLSstRtAsymptote
> ### Title: Horizontal Asymptote on the Right Side
> ### Aliases: NLSstRtAsymptote NLSstRtAsymptote.sortedXyData
> ### Keywords: manip
> 
> ### ** Examples
> 
> DNase.2 <- DNase[ DNase$Run == "2", ]
> DN.srt <- sortedXyData( expression(log(conc)), expression(density), DNase.2 )
> NLSstRtAsymptote( DN.srt )
[1] 2.157437
> 
> 
> 
> cleanEx()
> nameEx("NegBinomial")
> ### * NegBinomial
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: NegBinomial
> ### Title: The Negative Binomial Distribution
> ### Aliases: NegBinomial dnbinom pnbinom qnbinom rnbinom
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> x <- 0:11
> dnbinom(x, size = 1, prob = 1/2) * 2^(1 + x) # == 1
 [1] 1 1 1 1 1 1 1 1 1 1 1 1
> 126 /  dnbinom(0:8, size  = 2, prob  = 1/2) #- theoretically integer
[1]   504.0   504.0   672.0  1008.0  1612.8  2688.0  4608.0  8064.0 14336.0
> 
> 
> x <- 0:15
> size <- (1:20)/4
780 781
> persp(x, size, dnb <- outer(x, size, function(x,s) dnbinom(x, s, prob = 0.4)),
+       xlab = "x", ylab = "s", zlab = "density", theta = 150)
782 783
> title(tit <- "negative binomial density(x,s, pr = 0.4)  vs.  x & s")
> 
784 785
> image  (x, size, log10(dnb), main = paste("log [", tit, "]"))
> contour(x, size, log10(dnb), add = TRUE)
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
> 
> ## Alternative parametrization
> x1 <- rnbinom(500, mu = 4, size = 1)
> x2 <- rnbinom(500, mu = 4, size = 10)
> x3 <- rnbinom(500, mu = 4, size = 100)
> h1 <- hist(x1, breaks = 20, plot = FALSE)
> h2 <- hist(x2, breaks = h1$breaks, plot = FALSE)
> h3 <- hist(x3, breaks = h1$breaks, plot = FALSE)
> barplot(rbind(h1$counts, h2$counts, h3$counts),
+         beside = TRUE, col = c("red","blue","cyan"),
+         names.arg = round(h1$breaks[-length(h1$breaks)]))
> 
> 
> 
> cleanEx()
> nameEx("Normal")
> ### * Normal
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Normal
> ### Title: The Normal Distribution
> ### Aliases: Normal dnorm pnorm qnorm rnorm
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
815
> dnorm(0) == 1/sqrt(2*pi)
816
[1] TRUE
817
> dnorm(1) == exp(-1/2)/sqrt(2*pi)
818
[1] TRUE
819
> dnorm(1) == 1/sqrt(2*pi*exp(1))
820 821 822
[1] TRUE
> 
> ## Using "log = TRUE" for an extended range :
823 824
> par(mfrow = c(2,1))
> plot(function(x) dnorm(x, log = TRUE), -60, 50,
825
+      main = "log { Normal density }")
826 827 828
> curve(log(dnorm(x)), add = TRUE, col = "red", lwd = 2)
> mtext("dnorm(x, log=TRUE)", adj = 0)
> mtext("log(dnorm(x))", col = "red", adj = 1)
829
> 
830
> plot(function(x) pnorm(x, log.p = TRUE), -50, 10,
831
+      main = "log { Normal Cumulative }")
832 833 834
> curve(log(pnorm(x)), add = TRUE, col = "red", lwd = 2)
> mtext("pnorm(x, log=TRUE)", adj = 0)
> mtext("log(pnorm(x))", col = "red", adj = 1)
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
> 
> ## if you want the so-called 'error function'
> erf <- function(x) 2 * pnorm(x * sqrt(2)) - 1
> ## (see Abramowitz and Stegun 29.2.29)
> ## and the so-called 'complementary error function'
> erfc <- function(x) 2 * pnorm(x * sqrt(2), lower = FALSE)
> ## and the inverses
> erfinv <- function (x) qnorm((1 + x)/2)/sqrt(2)
> erfcinv <- function (x) qnorm(x/2, lower = FALSE)/sqrt(2)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("Poisson")
> ### * Poisson
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Poisson
> ### Title: The Poisson Distribution
> ### Aliases: Poisson dpois ppois qpois rpois
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
863
> -log(dpois(0:7, lambda = 1) * gamma(1+ 0:7)) # == 1
864 865 866 867 868 869
[1] 1 1 1 1 1 1 1 1
> Ni <- rpois(50, lambda = 4); table(factor(Ni, 0:max(Ni)))

 0  1  2  3  4  5  6  7  8  9 10 
 1  2  7  9  8 13  5  4  0  0  1 
> 
870
> 1 - ppois(10*(15:25), lambda = 100)  # becomes 0 (cancellation)
871 872 873
 [1] 1.233094e-06 1.261664e-08 7.085799e-11 2.252643e-13 4.440892e-16
 [6] 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
[11] 0.000000e+00
874
>     ppois(10*(15:25), lambda = 100, lower.tail = FALSE)  # no cancellation
875 876 877 878 879 880
 [1] 1.233094e-06 1.261664e-08 7.085800e-11 2.253110e-13 4.174239e-16
 [6] 4.626179e-19 3.142097e-22 1.337219e-25 3.639328e-29 6.453883e-33
[11] 7.587807e-37
> 
> par(mfrow = c(2, 1))
> x <- seq(-0.01, 5, 0.01)
881 882 883
> plot(x, ppois(x, 1), type = "s", ylab = "F(x)", main = "Poisson(1) CDF")
> plot(x, pbinom(x, 100, 0.01), type = "s", ylab = "F(x)",
+      main = "Binomial(100, 0.01) CDF")
884
> 
885 886 887 888 889
> ## The (limit) case  lambda = 0 :
> stopifnot(identical(dpois(0,0), 1),
+ 	  identical(ppois(0,0), 1),
+ 	  identical(qpois(1,0), 0))
> 
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSD")
> ### * SSD
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSD
> ### Title: SSD Matrix and Estimated Variance Matrix in Multivariate Models
> ### Aliases: SSD estVar
> ### Keywords: models multivariate
> 
> ### ** Examples
> 
> # Lifted from Baron+Li:
> # "Notes on the use of R for psychology experiments and questionnaires"
> # Maxwell and Delaney, p. 497
> reacttime <- matrix(c(
+ 420, 420, 480, 480, 600, 780,
+ 420, 480, 480, 360, 480, 600,
+ 480, 480, 540, 660, 780, 780,
+ 420, 540, 540, 480, 780, 900,
+ 540, 660, 540, 480, 660, 720,
+ 360, 420, 360, 360, 480, 540,
+ 480, 480, 600, 540, 720, 840,
+ 480, 600, 660, 540, 720, 900,
+ 540, 600, 540, 480, 720, 780,
+ 480, 420, 540, 540, 660, 780),
+ ncol = 6, byrow = TRUE,
921 922 923
+ dimnames = list(subj = 1:10,
+               cond = c("deg0NA", "deg4NA", "deg8NA",
+                        "deg0NP", "deg4NP", "deg8NP")))
924
> 
925
> mlmfit <- lm(reacttime ~ 1)
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
> SSD(mlmfit)
$SSD
        cond
cond     deg0NA deg4NA deg8NA deg0NP deg4NP deg8NP
  deg0NA  29160  30600  26640  23760  32400  25560
  deg4NA  30600  66600  32400   7200  36000  30600
  deg8NA  26640  32400  56160  41040  57600  69840
  deg0NP  23760   7200  41040  70560  72000  63360
  deg4NP  32400  36000  57600  72000 108000 100800
  deg8NP  25560  30600  69840  63360 100800 122760

$call
lm(formula = reacttime ~ 1)

$df
[1] 9

attr(,"class")
[1] "SSD"
> estVar(mlmfit)
        cond
cond     deg0NA deg4NA deg8NA deg0NP deg4NP deg8NP
  deg0NA   3240   3400   2960   2640   3600   2840
  deg4NA   3400   7400   3600    800   4000   3400
  deg8NA   2960   3600   6240   4560   6400   7760
  deg0NP   2640    800   4560   7840   8000   7040
  deg4NP   3600   4000   6400   8000  12000  11200
  deg8NP   2840   3400   7760   7040  11200  13640
> 
> 
> 
> cleanEx()
> nameEx("SSasymp")
> ### * SSasymp
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSasymp
> ### Title: Self-Starting Nls Asymptotic Regression Model
> ### Aliases: SSasymp
> ### Keywords: models
> 
> ### ** Examples
> 
> ## Don't show: 
> options(show.nls.convergence=FALSE)
972
> ## End(Don't show)
973
> Lob.329 <- Loblolly[ Loblolly$Seed == "329", ]
974
> SSasymp( Lob.329$age, 100, -8.5, -3.2 )   # response only
975
[1]  3.988924 11.505611 27.822517 41.130854 51.985354 60.838463
976 977 978 979
> local({
+   Asym <- 100 ; resp0 <- -8.5 ; lrc <- -3.2
+   SSasymp( Lob.329$age, Asym, resp0, lrc) # response _and_ gradient
+ })
980 981 982 983 984 985 986 987 988 989
[1]  3.988924 11.505611 27.822517 41.130854 51.985354 60.838463
attr(,"gradient")
          Asym     resp0      lrc
[1,] 0.1151053 0.8848947 11.74087
[2,] 0.1843835 0.8156165 18.03613
[3,] 0.3347697 0.6652303 29.42113
[4,] 0.4574272 0.5425728 35.99454
[5,] 0.5574687 0.4425313 39.14366
[6,] 0.6390642 0.3609358 39.90776
> getInitial(height ~ SSasymp( age, Asym, resp0, lrc), data = Lob.329)
990 991
     Asym     resp0       lrc 
94.128204 -8.250753 -3.217578 
992 993 994 995 996 997 998 999 1000 1001 1002 1003
> ## Initial values are in fact the converged values
> fm1 <- nls(height ~ SSasymp( age, Asym, resp0, lrc), data = Lob.329)
> summary(fm1)

Formula: height ~ SSasymp(age, Asym, resp0, lrc)

Parameters:
      Estimate Std. Error t value Pr(>|t|)    
Asym   94.1282     8.4030  11.202 0.001525 ** 
resp0  -8.2508     1.2261  -6.729 0.006700 ** 
lrc    -3.2176     0.1386 -23.218 0.000175 ***
---
1004
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
1005 1006 1007 1008

Residual standard error: 0.7493 on 3 degrees of freedom

> 
1009 1010 1011 1012 1013 1014 1015 1016 1017
> ## Visualize the SSasymp()  model  parametrization :
> 
>   xx <- seq(-.3, 5, len = 101)
>   ##  Asym + (R0-Asym) * exp(-exp(lrc)* x) :
>   yy <- 5 - 4 * exp(-xx / exp(3/4))
>   stopifnot( all.equal(yy, SSasymp(xx, Asym = 5, R0 = 1, lrc = -3/4)) )
>   require(graphics)
>   op <- par(mar = c(0, .2, 4.1, 0))
>   plot(xx, yy, type = "l", axes = FALSE, ylim = c(0,5.2), xlim = c(-.3, 5),
1018
+        xlab = "", ylab = "", lwd = 2,
1019 1020 1021
+        main = quote("Parameters in the SSasymp model " ~
+                     {f[phi](x) == phi[1] + (phi[2]-phi[1])*~e^{-e^{phi[3]}*~x}}))
>   mtext(quote(list(phi[1] == "Asym", phi[2] == "R0", phi[3] == "lrc")))
1022 1023 1024 1025
>   usr <- par("usr")
>   arrows(usr[1], 0, usr[2], 0, length = 0.1, angle = 25)
>   arrows(0, usr[3], 0, usr[4], length = 0.1, angle = 25)
>   text(usr[2] - 0.2, 0.1, "x", adj = c(1, 0))
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
>   text(     -0.1, usr[4], "y", adj = c(1, 1))
>   abline(h = 5, lty = 3)
>   arrows(c(0.35, 0.65), 1,
+          c(0  ,  1   ), 1, length = 0.08, angle = 25); text(0.5, 1, quote(1))
>   y0 <- 1 + 4*exp(-3/4) ; t.5 <- log(2) / exp(-3/4) ; AR2 <- 3 # (Asym + R0)/2
>   segments(c(1, 1), c( 1, y0),
+            c(1, 0), c(y0,  1),  lty = 2, lwd = 0.75)
>   text(1.1, 1/2+y0/2, quote((phi[1]-phi[2])*e^phi[3]), adj = c(0,.5))
>   axis(2, at = c(1,AR2,5), labels= expression(phi[2], frac(phi[1]+phi[2],2), phi[1]),
+        pos=0, las=1)
>   arrows(c(.6,t.5-.6), AR2,
+          c(0, t.5   ), AR2, length = 0.08, angle = 25)
>   text(   t.5/2,   AR2, quote(t[0.5]))
>   text(   t.5 +.4, AR2,
+        quote({f(t[0.5]) == frac(phi[1]+phi[2],2)}~{} %=>% {}~~
+                 {t[0.5] == frac(log(2), e^{phi[3]})}), adj = c(0, 0.5))
>   par(op)
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSasympOff")
> ### * SSasympOff
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSasympOff
> ### Title: Self-Starting Nls Asymptotic Regression Model with an Offset
> ### Aliases: SSasympOff
> ### Keywords: models
> 
> ### ** Examples
> 
> CO2.Qn1 <- CO2[CO2$Plant == "Qn1", ]
> SSasympOff(CO2.Qn1$conc, 32, -4, 43)  # response only
[1] 19.65412 29.14785 31.27791 31.88435 31.99259 31.99970 32.00000
1063 1064 1065
> local({  Asym <- 32; lrc <- -4; c0 <- 43
+   SSasympOff(CO2.Qn1$conc, Asym, lrc, c0) # response and gradient
+ })
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
[1] 19.65412 29.14785 31.27791 31.88435 31.99259 31.99970 32.00000
attr(,"gradient")
          Asym          lrc            c0
[1,] 0.6141911 1.175838e+01 -2.261227e-01
[2,] 0.9108704 6.895531e+00 -5.223887e-02
[3,] 0.9774346 2.737698e+00 -1.322559e-02
[4,] 0.9963859 6.503026e-01 -2.118250e-03
[5,] 0.9997683 6.204920e-02 -1.357751e-04
[6,] 0.9999906 3.479529e-03 -5.505583e-06
[7,] 1.0000000 1.369435e-05 -1.430967e-08
> getInitial(uptake ~ SSasympOff(conc, Asym, lrc, c0), data = CO2.Qn1)
1077 1078
     Asym       lrc        c0 
38.139782 -4.380647 51.223238 
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
> ## Initial values are in fact the converged values
> fm1 <- nls(uptake ~ SSasympOff(conc, Asym, lrc, c0), data = CO2.Qn1)
> summary(fm1)

Formula: uptake ~ SSasympOff(conc, Asym, lrc, c0)

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
Asym  38.1398     0.9164  41.620 1.99e-06 ***
lrc   -4.3806     0.2042 -21.457 2.79e-05 ***
c0    51.2232    11.6698   4.389   0.0118 *  
---
1091
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
1092 1093 1094 1095

Residual standard error: 1.663 on 4 degrees of freedom

> 
1096 1097 1098 1099 1100 1101 1102 1103
> ## Visualize the SSasympOff()  model  parametrization :
> 
>   xx <- seq(0.25, 8,  by=1/16)
>   yy <- 5 * (1 -  exp(-(xx - 3/4)*0.4))
>   stopifnot( all.equal(yy, SSasympOff(xx, Asym = 5, lrc = log(0.4), c0 = 3/4)) )
>   require(graphics)
>   op <- par(mar = c(0, 0, 4.0, 0))
>   plot(xx, yy, type = "l", axes = FALSE, ylim = c(-.5,6), xlim = c(-1, 8),
1104 1105
+        xlab = "", ylab = "", lwd = 2,
+        main = "Parameters in the SSasympOff model")
1106
>   mtext(quote(list(phi[1] == "Asym", phi[2] == "lrc", phi[3] == "c0")))
1107 1108 1109 1110
>   usr <- par("usr")
>   arrows(usr[1], 0, usr[2], 0, length = 0.1, angle = 25)
>   arrows(0, usr[3], 0, usr[4], length = 0.1, angle = 25)
>   text(usr[2] - 0.2, 0.1, "x", adj = c(1, 0))
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
>   text(     -0.1, usr[4], "y", adj = c(1, 1))
>   abline(h = 5, lty = 3)
>   arrows(-0.8, c(2.1, 2.9),
+          -0.8, c(0  , 5  ), length = 0.1, angle = 25)
>   text  (-0.8, 2.5, quote(phi[1]))
>   segments(3/4, -.2, 3/4, 1.6, lty = 2)
>   text    (3/4,    c(-.3, 1.7), quote(phi[3]))
>   arrows(c(1.1, 1.4), -.15,
+          c(3/4, 7/4), -.15, length = 0.07, angle = 25)
>   text    (3/4 + 1/2, -.15, quote(1))
>   segments(c(3/4, 7/4, 7/4), c(0, 0, 2),   # 5 * exp(log(0.4)) = 2
+            c(7/4, 7/4, 3/4), c(0, 2, 0),  lty = 2, lwd = 2)
>   text(      7/4 +.1, 2./2, quote(phi[1]*e^phi[2]), adj = c(0, .5))
>   par(op)
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSasympOrig")
> ### * SSasympOrig
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSasympOrig
> ### Title: Self-Starting Nls Asymptotic Regression Model through the Origin
> ### Aliases: SSasympOrig
> ### Keywords: models
> 
> ### ** Examples
1141 1142
> 
> ## Visualize the SSasympOrig()  model  parametrization :
1143 1144
> 
>   xx <- seq(0, 5, len = 101)
1145 1146 1147 1148 1149 1150
>   yy <- 5 * (1- exp(-xx * log(2)))
>   stopifnot( all.equal(yy, SSasympOrig(xx, Asym = 5, lrc = log(log(2)))) )
> 
>   require(graphics)
>   op <- par(mar = c(0, 0, 3.5, 0))
>   plot(xx, yy, type = "l", axes = FALSE, ylim = c(0,5), xlim = c(-1/4, 5),
1151
+        xlab = "", ylab = "", lwd = 2,
1152 1153
+        main = quote("Parameters in the SSasympOrig model"~~ f[phi](x)))
>   mtext(quote(list(phi[1] == "Asym", phi[2] == "lrc")))
1154 1155 1156 1157
>   usr <- par("usr")
>   arrows(usr[1], 0, usr[2], 0, length = 0.1, angle = 25)
>   arrows(0, usr[3], 0, usr[4], length = 0.1, angle = 25)
>   text(usr[2] - 0.2, 0.1, "x", adj = c(1, 0))
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
>   text(   -0.1,   usr[4], "y", adj = c(1, 1))
>   abline(h = 5, lty = 3)
>   axis(2, at = 5*c(1/2,1), labels= expression(frac(phi[1],2), phi[1]), pos=0, las=1)
>   arrows(c(.3,.7), 5/2,
+          c(0, 1 ), 5/2, length = 0.08, angle = 25)
>   text(   0.5,     5/2, quote(t[0.5]))
>   text(   1 +.4,   5/2,
+        quote({f(t[0.5]) == frac(phi[1],2)}~{} %=>% {}~~{t[0.5] == frac(log(2), e^{phi[2]})}),
+        adj = c(0, 0.5))
>   par(op)
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSbiexp")
> ### * SSbiexp
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSbiexp
> ### Title: Self-Starting Nls Biexponential model
> ### Aliases: SSbiexp
> ### Keywords: models
> 
> ### ** Examples
> 
> Indo.1 <- Indometh[Indometh$Subject == 1, ]
> SSbiexp( Indo.1$time, 3, 1, 0.6, -1.3 )  # response only
 [1] 2.08098572 1.29421044 0.87967145 0.65483364 0.52711347 0.36094621
 [7] 0.26575722 0.20176113 0.15359129 0.11694936 0.06780767
> A1 <- 3; lrc1 <- 1; A2 <- 0.6; lrc2 <- -1.3
> SSbiexp( Indo.1$time, A1, lrc1, A2, lrc2 ) # response and gradient
 [1] 2.08098572 1.29421044 0.87967145 0.65483364 0.52711347 0.36094621
 [7] 0.26575722 0.20176113 0.15359129 0.11694936 0.06780767
attr(,"gradient")
                A1          lrc1        A2        lrc2
 [1,] 5.068347e-01 -1.033290e+00 0.9341363 -0.03818728
 [2,] 2.568814e-01 -1.047414e+00 0.8726106 -0.07134424
 [3,] 1.301964e-01 -7.962985e-01 0.8151372 -0.09996786
 [4,] 6.598804e-02 -5.381222e-01 0.7614492 -0.12451147
 [5,] 3.344502e-02 -3.409237e-01 0.7112973 -0.14538835
 [6,] 4.354421e-03 -7.101926e-02 0.5798049 -0.18961833
 [7,] 2.873397e-04 -7.029632e-03 0.4414920 -0.21657709
 [8,] 1.896098e-05 -6.184955e-04 0.3361737 -0.21988328
 [9,] 1.251198e-06 -5.101663e-05 0.2559792 -0.20928744
[10,] 8.256409e-08 -4.039784e-06 0.1949152 -0.19123411
[11,] 3.595188e-10 -2.345456e-08 0.1130128 -0.14783797
> print(getInitial(conc ~ SSbiexp(time, A1, lrc1, A2, lrc2), data = Indo.1),
+       digits = 5)
      A1     lrc1       A2     lrc2 
 2.02928  0.57939  0.19155 -1.78778 
> ## Initial values are in fact the converged values
> fm1 <- nls(conc ~ SSbiexp(time, A1, lrc1, A2, lrc2), data = Indo.1)
> summary(fm1)

Formula: conc ~ SSbiexp(time, A1, lrc1, A2, lrc2)

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
A1     2.0293     0.1099  18.464 3.39e-07 ***
lrc1   0.5794     0.1247   4.648  0.00235 ** 
A2     0.1915     0.1106   1.731  0.12698    
lrc2  -1.7878     0.7871  -2.271  0.05737 .  
---
1223
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
1224 1225 1226

Residual standard error: 0.04103 on 7 degrees of freedom

1227 1228
> 
> ## Show the model components visually
1229 1230 1231 1232 1233
>   require(graphics)
> 
>   xx <- seq(0, 5, len = 101)
>   y1 <- 3.5 * exp(-4*xx)
>   y2 <- 1.5 * exp(-xx)
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
>   plot(xx, y1 + y2, type = "l", lwd=2, ylim = c(-0.2,6), xlim = c(0, 5),
+        main = "Components of the SSbiexp model")
>   lines(xx, y1, lty = 2, col="tomato"); abline(v=0, h=0, col="gray40")
>   lines(xx, y2, lty = 3, col="blue2" )
>   legend("topright", c("y1+y2", "y1 = 3.5 * exp(-4*x)", "y2 = 1.5 * exp(-x)"),
+          lty=1:3, col=c("black","tomato","blue2"), bty="n")
>   axis(2, pos=0, at = c(3.5, 1.5), labels = c("A1","A2"), las=2)
> 
> ## and how you could have got their sum via SSbiexp():
>   ySS <- SSbiexp(xx, 3.5, log(4), 1.5, log(1))
>   ##                      ---          ---
>   stopifnot(all.equal(y1+y2, ySS, tolerance = 1e-15))
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
> 
> 
> 
> cleanEx()
> nameEx("SSfol")
> ### * SSfol
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSfol
> ### Title: Self-Starting Nls First-order Compartment Model
> ### Aliases: SSfol
> ### Keywords: models
> 
> ### ** Examples
> 
> Theoph.1 <- Theoph[ Theoph$Subject == 1, ]
1263
> with(Theoph.1, SSfol(Dose, Time, -2.5, 0.5, -3)) # response only
1264 1265
 [1] 0.000000 2.214486 3.930988 5.261945 5.659813 5.084852 4.587699 3.916808
 [9] 3.318395 2.579204 0.943593
1266 1267 1268
> with(Theoph.1, local({  lKe <- -2.5; lKa <- 0.5; lCl <- -3
+   SSfol(Dose, Time, lKe, lKa, lCl) # response _and_ gradient
+ }))
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
 [1] 0.000000 2.214486 3.930988 5.261945 5.659813 5.084852 4.587699 3.916808
 [9] 3.318395 2.579204 0.943593
attr(,"gradient")
            lKe         lKa       lCl
 [1,]  0.000000  0.00000000  0.000000
 [2,]  2.190284  1.78781716 -2.214486
 [3,]  3.825518  2.35519507 -3.930988
 [4,]  4.952713  1.75648252 -5.261945
 [5,]  4.976520  0.53458070 -5.659813
 [6,]  3.752822 -0.18560297 -5.084852
 [7,]  2.906859 -0.22729852 -4.587699
 [8,]  1.861771 -0.20447579 -3.916808
 [9,]  1.027129 -0.17383515 -3.318395
[10,]  0.148370 -0.13513891 -2.579204
[11,] -0.894541 -0.04944021 -0.943593
> getInitial(conc ~ SSfol(Dose, Time, lKe, lKa, lCl), data = Theoph.1)
1285 1286
      lKe       lKa       lCl 
-2.994845  0.609169 -3.971003 
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
> ## Initial values are in fact the converged values
> fm1 <- nls(conc ~ SSfol(Dose, Time, lKe, lKa, lCl), data = Theoph.1)
> summary(fm1)

Formula: conc ~ SSfol(Dose, Time, lKe, lKa, lCl)

Parameters:
    Estimate Std. Error t value Pr(>|t|)    
lKe  -2.9196     0.1709 -17.085 1.40e-07 ***
lKa   0.5752     0.1728   3.328   0.0104 *  
lCl  -3.9159     0.1273 -30.768 1.35e-09 ***
---
1299
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322

Residual standard error: 0.732 on 8 degrees of freedom

> 
> 
> 
> cleanEx()
> nameEx("SSfpl")
> ### * SSfpl
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSfpl
> ### Title: Self-Starting Nls Four-Parameter Logistic Model
> ### Aliases: SSfpl
> ### Keywords: models
> 
> ### ** Examples
> 
> Chick.1 <- ChickWeight[ChickWeight$Chick == 1, ]
> SSfpl(Chick.1$Time, 13, 368, 14, 6)  # response only
 [1]  44.38189  55.31704  69.39853  87.05603 108.47420 133.43149 161.18758
 [8] 190.50000 219.81242 247.56851 272.52580 283.70240
1323 1324 1325 1326
> local({
+   A <- 13; B <- 368; xmid <- 14; scal <- 6
+   SSfpl(Chick.1$Time, A, B, xmid, scal) # response _and_ gradient
+ })
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
 [1]  44.38189  55.31704  69.39853  87.05603 108.47420 133.43149 161.18758
 [8] 190.50000 219.81242 247.56851 272.52580 283.70240
attr(,"gradient")
              A          B       xmid       scal
 [1,] 0.9116003 0.08839968  -4.767956  11.125231
 [2,] 0.8807971 0.11920292  -6.212120  12.424241
 [3,] 0.8411309 0.15886910  -7.906425  13.177374
 [4,] 0.7913915 0.20860853  -9.767885  13.023846
 [5,] 0.7310586 0.26894142 -11.632873  11.632873
 [6,] 0.6607564 0.33924363 -13.262646   8.841764
 [7,] 0.5825702 0.41742979 -14.388278   4.796093
 [8,] 0.5000000 0.50000000 -14.791667   0.000000
 [9,] 0.4174298 0.58257021 -14.388278  -4.796093
[10,] 0.3392436 0.66075637 -13.262646  -8.841764
[11,] 0.2689414 0.73105858 -11.632873 -11.632873
[12,] 0.2374580 0.76254197 -10.713410 -12.498978
> print(getInitial(weight ~ SSfpl(Time, A, B, xmid, scal), data = Chick.1),
+       digits = 5)
       A        B     xmid     scal 
 27.4532 348.9712  19.3905   6.6726 
> ## Initial values are in fact the converged values
> fm1 <- nls(weight ~ SSfpl(Time, A, B, xmid, scal), data = Chick.1)
> summary(fm1)

Formula: weight ~ SSfpl(Time, A, B, xmid, scal)

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
A      27.453      6.601   4.159 0.003169 ** 
B     348.971     57.899   6.027 0.000314 ***
xmid   19.391      2.194   8.836 2.12e-05 ***
scal    6.673      1.002   6.662 0.000159 ***
---
1360
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
1361 1362 1363 1364

Residual standard error: 2.351 on 8 degrees of freedom

> 
1365
> ## Visualizing the  SSfpl()  parametrization
1366
>   xx <- seq(-0.5, 5, len = 101)
1367 1368 1369 1370
>   yy <- 1 + 4 / (1 + exp((2-xx))) # == SSfpl(xx, *) :
>   stopifnot( all.equal(yy, SSfpl(xx, A = 1, B = 5, xmid = 2, scal = 1)) )
>   require(graphics)
>   op <- par(mar = c(0, 0, 3.5, 0))
1371 1372 1373
>   plot(xx, yy, type = "l", axes = FALSE, ylim = c(0,6), xlim = c(-1, 5),
+        xlab = "", ylab = "", lwd = 2,
+        main = "Parameters in the SSfpl model")
1374
>   mtext(quote(list(phi[1] == "A", phi[2] == "B", phi[3] == "xmid", phi[4] == "scal")))
1375 1376 1377 1378
>   usr <- par("usr")
>   arrows(usr[1], 0, usr[2], 0, length = 0.1, angle = 25)
>   arrows(0, usr[3], 0, usr[4], length = 0.1, angle = 25)
>   text(usr[2] - 0.2, 0.1, "x", adj = c(1, 0))
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
>   text(     -0.1, usr[4], "y", adj = c(1, 1))
>   abline(h = c(1, 5), lty = 3)
>   arrows(-0.8, c(2.1, 2.9),
+          -0.8, c(0,   5  ), length = 0.1, angle = 25)
>   text  (-0.8, 2.5, quote(phi[1]))
>   arrows(-0.3, c(1/4, 3/4),
+          -0.3, c(0,   1  ), length = 0.07, angle = 25)
>   text  (-0.3, 0.5, quote(phi[2]))
>   text(2, -.1, quote(phi[3]))
>   segments(c(2,3,3), c(0,3,4), # SSfpl(x = xmid = 2) = 3
+            c(2,3,2), c(3,4,3),    lty = 2, lwd = 0.75)
>   arrows(c(2.3, 2.7), 3,
+          c(2.0, 3  ), 3, length = 0.08, angle = 25)
>   text(      2.5,     3, quote(phi[4])); text(3.1, 3.5, "1")
>   par(op)
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSgompertz")
> ### * SSgompertz
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSgompertz
> ### Title: Self-Starting Nls Gompertz Growth Model
> ### Aliases: SSgompertz
> ### Keywords: models
> 
> ### ** Examples
> 
> DNase.1 <- subset(DNase, Run == 1)
1412
> SSgompertz(log(DNase.1$conc), 4.5, 2.3, 0.7)  # response only
1413 1414 1415
 [1] 0.00525729 0.00525729 0.07323255 0.07323255 0.18049064 0.18049064
 [7] 0.36508763 0.36508763 0.63288772 0.63288772 0.97257180 0.97257180
[13] 1.36033340 1.36033340 1.76786902 1.76786902
1416 1417 1418
> local({  Asym <- 4.5; b2 <- 2.3; b3 <- 0.7
+   SSgompertz(log(DNase.1$conc), Asym, b2, b3) # response _and_ gradient
+ })
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
 [1] 0.00525729 0.00525729 0.07323255 0.07323255 0.18049064 0.18049064
 [7] 0.36508763 0.36508763 0.63288772 0.63288772 0.97257180 0.97257180
[13] 1.36033340 1.36033340 1.76786902 1.76786902
attr(,"gradient")
             Asym          b2         b3
 [1,] 0.001168287 -0.01543407  0.1531221
 [2,] 0.001168287 -0.01543407  0.1531221
 [3,] 0.016273900 -0.13112424  0.7036230
 [4,] 0.016273900 -0.13112424  0.7036230
 [5,] 0.040109031 -0.25238507  0.7795153
 [6,] 0.040109031 -0.25238507  0.7795153
 [7,] 0.081130585 -0.39869082  0.3233828
 [8,] 0.081130585 -0.39869082  0.3233828
 [9,] 0.140641716 -0.53975407 -0.7914802
[10,] 0.140641716 -0.53975407 -0.7914802
[11,] 0.216127067 -0.64777036 -2.4251586
[12,] 0.216127067 -0.64777036 -2.4251586
[13,] 0.302296311 -0.70757894 -4.2605728
[14,] 0.302296311 -0.70757894 -4.2605728
[15,] 0.392859783 -0.71814108 -5.9597255
[16,] 0.392859783 -0.71814108 -5.9597255
> print(getInitial(density ~ SSgompertz(log(conc), Asym, b2, b3),
+                  data = DNase.1), digits = 5)
   Asym      b2      b3 
4.60333 2.27134 0.71647 
> ## Initial values are in fact the converged values
> fm1 <- nls(density ~ SSgompertz(log(conc), Asym, b2, b3),
+            data = DNase.1)
> summary(fm1)

Formula: density ~ SSgompertz(log(conc), Asym, b2, b3)

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
Asym  4.60333    0.65321   7.047 8.71e-06 ***
b2    2.27134    0.14373  15.803 7.24e-10 ***
b3    0.71647    0.02206  32.475 7.85e-14 ***
---
1457
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
1458 1459 1460

Residual standard error: 0.02684 on 13 degrees of freedom

1461 1462 1463 1464 1465 1466
> plot(density ~ log(conc), DNase.1, # xlim = c(0, 21),
+      main = "SSgompertz() fit to DNase.1")
> ux <- par("usr")[1:2]; x <- seq(ux[1], ux[2], length.out=250)
> lines(x, do.call(SSgompertz, c(list(x=x), coef(fm1))), col = "red", lwd=2)
> As <- coef(fm1)[["Asym"]]; abline(v = 0, h = 0, lty = 3)
> axis(2, at= exp(-coef(fm1)[["b2"]]), quote(e^{-b[2]}), las=1, pos=0)
1467 1468 1469
> 
> 
> 
1470
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
> cleanEx()
> nameEx("SSlogis")
> ### * SSlogis
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSlogis
> ### Title: Self-Starting Nls Logistic Model
> ### Aliases: SSlogis
> ### Keywords: models
> 
> ### ** Examples
> 
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
> dwlg1 <- data.frame(Prop = c(rep(0,5), 2, 5, rep(9, 9)), end = 1:16)
> iPar <- getInitial(Prop ~ SSlogis(end, Asym, xmid, scal), data = dwlg1)
> ## failed in R <= 3.4.2 (because of the '0's in 'Prop')
> stopifnot(all.equal(tol = 1e-6,
+    iPar, c(Asym = 9.0678, xmid = 6.79331, scal = 0.499934)))
> 
> ## Visualize the SSlogis()  model  parametrization :
>   xx <- seq(-0.75, 5, by=1/32)
>   yy <- 5 / (1 + exp((2-xx)/0.6)) # == SSlogis(xx, *):
>   stopifnot( all.equal(yy, SSlogis(xx, Asym = 5, xmid = 2, scal = 0.6)) )
>   require(graphics)
>   op <- par(mar = c(0.5, 0, 3.5, 0))
1496 1497 1498
>   plot(xx, yy, type = "l", axes = FALSE, ylim = c(0,6), xlim = c(-1, 5),
+        xlab = "", ylab = "", lwd = 2,
+        main = "Parameters in the SSlogis model")
1499
>   mtext(quote(list(phi[1] == "Asym", phi[2] == "xmid", phi[3] == "scal")))
1500 1501 1502 1503
>   usr <- par("usr")
>   arrows(usr[1], 0, usr[2], 0, length = 0.1, angle = 25)
>   arrows(0, usr[3], 0, usr[4], length = 0.1, angle = 25)
>   text(usr[2] - 0.2, 0.1, "x", adj = c(1, 0))
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
>   text(     -0.1, usr[4], "y", adj = c(1, 1))
>   abline(h = 5, lty = 3)
>   arrows(-0.8, c(2.1, 2.9),
+          -0.8, c(0,   5  ), length = 0.1, angle = 25)
>   text  (-0.8, 2.5, quote(phi[1]))
>   segments(c(2,2.6,2.6), c(0,  2.5,3.5),   # NB.  SSlogis(x = xmid = 2) = 2.5
+            c(2,2.6,2  ), c(2.5,3.5,2.5), lty = 2, lwd = 0.75)
>   text(2, -.1, quote(phi[2]))
>   arrows(c(2.2, 2.4), 2.5,
+          c(2.0, 2.6), 2.5, length = 0.08, angle = 25)
>   text(      2.3,     2.5, quote(phi[3])); text(2.7, 3, "1")
>   par(op)
> 
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSmicmen")
> ### * SSmicmen
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSmicmen
> ### Title: Self-Starting Nls Michaelis-Menten Model
> ### Aliases: SSmicmen
> ### Keywords: models
> 
> ### ** Examples
> 
> PurTrt <- Puromycin[ Puromycin$state == "treated", ]
> SSmicmen(PurTrt$conc, 200, 0.05)  # response only
 [1]  57.14286  57.14286 109.09091 109.09091 137.50000 137.50000 162.96296
 [8] 162.96296 183.60656 183.60656 191.30435 191.30435
1537 1538 1539
> local({  Vm <- 200; K <- 0.05
+   SSmicmen(PurTrt$conc, Vm, K)    # response _and_ gradient
+ })
1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
 [1]  57.14286  57.14286 109.09091 109.09091 137.50000 137.50000 162.96296
 [8] 162.96296 183.60656 183.60656 191.30435 191.30435
attr(,"gradient")
             Vm         K
 [1,] 0.2857143 -816.3265
 [2,] 0.2857143 -816.3265
 [3,] 0.5454545 -991.7355
 [4,] 0.5454545 -991.7355
 [5,] 0.6875000 -859.3750
 [6,] 0.6875000 -859.3750
 [7,] 0.8148148 -603.5665
 [8,] 0.8148148 -603.5665
 [9,] 0.9180328 -300.9944
[10,] 0.9180328 -300.9944
[11,] 0.9565217 -166.3516
[12,] 0.9565217 -166.3516
1556
> print(getInitial(rate ~ SSmicmen(conc, Vm, K), data = PurTrt), digits = 3)
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
      Vm        K 
212.6837   0.0641 
> ## Initial values are in fact the converged values
> fm1 <- nls(rate ~ SSmicmen(conc, Vm, K), data = PurTrt)
> summary(fm1)

Formula: rate ~ SSmicmen(conc, Vm, K)

Parameters:
    Estimate Std. Error t value Pr(>|t|)    
Vm 2.127e+02  6.947e+00  30.615 3.24e-11 ***
K  6.412e-02  8.281e-03   7.743 1.57e-05 ***
---
1570
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
1571 1572 1573 1574 1575 1576

Residual standard error: 10.93 on 10 degrees of freedom

> ## Alternative call using the subset argument
> fm2 <- nls(rate ~ SSmicmen(conc, Vm, K), data = Puromycin,
+            subset = state == "treated")
1577
> summary(fm2) # The same indeed:
1578 1579 1580 1581 1582 1583 1584 1585

Formula: rate ~ SSmicmen(conc, Vm, K)

Parameters:
    Estimate Std. Error t value Pr(>|t|)    
Vm 2.127e+02  6.947e+00  30.615 3.24e-11 ***
K  6.412e-02  8.281e-03   7.743 1.57e-05 ***
---
1586
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
1587 1588 1589

Residual standard error: 10.93 on 10 degrees of freedom

1590 1591 1592
> stopifnot(all.equal(coef(summary(fm1)), coef(summary(fm2))))
> 
> ## Visualize the SSmicmen()  Michaelis-Menton model parametrization :
1593 1594 1595
> 
>   xx <- seq(0, 5, len = 101)
>   yy <- 5 * xx/(1+xx)
1596 1597 1598 1599 1600 1601
>   stopifnot(all.equal(yy, SSmicmen(xx, Vm = 5, K = 1)))
>   require(graphics)
>   op <- par(mar = c(0, 0, 3.5, 0))
>   plot(xx, yy, type = "l", lwd = 2, ylim = c(-1/4,6), xlim = c(-1, 5),
+        ann = FALSE, axes = FALSE, main = "Parameters in the SSmicmen model")
>   mtext(quote(list(phi[1] == "Vm", phi[2] == "K")))
1602 1603 1604 1605
>   usr <- par("usr")
>   arrows(usr[1], 0, usr[2], 0, length = 0.1, angle = 25)
>   arrows(0, usr[3], 0, usr[4], length = 0.1, angle = 25)
>   text(usr[2] - 0.2, 0.1, "x", adj = c(1, 0))
1606 1607 1608 1609 1610
>   text(     -0.1, usr[4], "y", adj = c(1, 1))
>   abline(h = 5, lty = 3)
>   arrows(-0.8, c(2.1, 2.9),
+          -0.8, c(0,   5  ),  length = 0.1, angle = 25)
>   text(  -0.8,     2.5, quote(phi[1]))
1611
>   segments(1, 0, 1, 2.7, lty = 2, lwd = 0.75)
1612 1613
>   text(1, 2.7, quote(phi[2]))
>   par(op)
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSweibull")
> ### * SSweibull
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSweibull
> ### Title: Self-Starting Nls Weibull Growth Curve Model
> ### Aliases: SSweibull
> ### Keywords: models
> 
> ### ** Examples
> 
> Chick.6 <- subset(ChickWeight, (Chick == 6) & (Time > 0))
> SSweibull(Chick.6$Time, 160, 115, -5.5, 2.5)   # response only
 [1]  47.62811  59.09743  79.79756 105.12008 128.41818 145.02585 154.25783
 [8] 158.24919 159.58222 159.92314 159.97023
1635 1636 1637
> local({ Asym <- 160; Drop <- 115; lrc <- -5.5; pwr <- 2.5
+   SSweibull(Chick.6$Time, Asym, Drop, lrc, pwr) # response _and_ gradient
+ })
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
 [1]  47.62811  59.09743  79.79756 105.12008 128.41818 145.02585 154.25783
 [8] 158.24919 159.58222 159.92314 159.97023
attr(,"gradient")
      Asym          Drop        lrc        pwr
 [1,]    1 -0.9771469094  2.5978438  1.8006881
 [2,]    1 -0.8774136912 13.1957043 18.2931305
 [3,]    1 -0.6974125358 28.9032091 51.7875987
 [4,]    1 -0.4772166721 40.5993205 84.4239136
 [5,]    1 -0.2746244909 40.8147795 93.9795029
 [6,]    1 -0.1302099955 30.5264027 75.8552610
 [7,]    1 -0.0499319343 17.2098335 45.4177374
 [8,]    1 -0.0152244293  7.3268815 20.3144290
 [9,]    1 -0.0036328431  2.3469622  6.7835933
[10,]    1 -0.0006683898  0.5619310  1.6833949
[11,]    1 -0.0002589123  0.2459116  0.7486834
> getInitial(weight ~ SSweibull(Time, Asym, Drop, lrc, pwr), data = Chick.6)
      Asym       Drop        lrc        pwr 
158.501204 110.997081  -5.993421   2.646141 
> ## Initial values are in fact the converged values
> fm1 <- nls(weight ~ SSweibull(Time, Asym, Drop, lrc, pwr), data = Chick.6)
> summary(fm1)

Formula: weight ~ SSweibull(Time, Asym, Drop, lrc, pwr)

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
Asym 158.5012     1.1769  134.67 3.28e-13 ***
Drop 110.9971     2.6330   42.16 1.10e-09 ***
lrc   -5.9934     0.3733  -16.05 8.83e-07 ***
pwr    2.6461     0.1613   16.41 7.62e-07 ***
---
1669
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
1670 1671 1672

Residual standard error: 2.061 on 7 degrees of freedom

1673 1674 1675 1676 1677
> ## Data and Fit:
> plot(weight ~ Time, Chick.6, xlim = c(0, 21), main = "SSweibull() fit to Chick.6")
> ux <- par("usr")[1:2]; x <- seq(ux[1], ux[2], length.out=250)
> lines(x, do.call(SSweibull, c(list(x=x), coef(fm1))), col = "red", lwd=2)
> As <- coef(fm1)[["Asym"]]; abline(v = 0, h = c(As, As - coef(fm1)[["Drop"]]), lty = 3)
1678 1679 1680
> 
> 
> 
1681
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
> cleanEx()
> nameEx("SignRank")
> ### * SignRank
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SignRank
> ### Title: Distribution of the Wilcoxon Signed Rank Statistic
> ### Aliases: SignRank dsignrank psignrank qsignrank rsignrank
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
1697
> par(mfrow = c(2,2))
1698
> for(n in c(4:5,10,40)) {
1699 1700 1701
+   x <- seq(0, n*(n+1)/2, length = 501)
+   plot(x, dsignrank(x, n = n), type = "l",
+        main = paste0("dsignrank(x, n = ", n, ")"))
1702 1703 1704 1705
+ }
> ## Don't show: 
> p <- c(1, 1, 1, 2, 2:6, 8, 10, 11, 13, 15, 17, 20, 22, 24,
+        27, 29, 31, 33, 35, 36, 38, 39, 39, 40)
1706 1707
> stopifnot(round(dsignrank(0:56, n = 10)* 2^10) == c(p, rev(p), 0),
+           qsignrank((1:16)/ 16, n = 4) == c(0:2, rep(3:7, each = 2), 8:10))
1708
> ## End(Don't show)
1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("StructTS")
> ### * StructTS
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: StructTS
> ### Title: Fit Structural Time Series
> ### Aliases: StructTS print.StructTS predict.StructTS
> ### Keywords: ts
> 
> ### ** Examples
> 
> ## see also JohnsonJohnson, Nile and AirPassengers
> require(graphics)
> 
1729
> trees <- window(treering, start = 0)
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
> (fit <- StructTS(trees, type = "level"))

Call:
StructTS(x = trees, type = "level")

Variances:
  level  epsilon  
0.00037  0.07199  
> plot(trees)
> lines(fitted(fit), col = "green")
> tsdiag(fit)
> 
> (fit <- StructTS(log10(UKgas), type = "BSM"))

Call:
StructTS(x = log10(UKgas), type = "BSM")

Variances:
    level      slope       seas    epsilon  
0.000e+00  1.733e-05  7.137e-04  3.678e-04  
1750
> par(mfrow = c(4, 1)) # to give appropriate aspect ratio for next plot.
1751 1752 1753 1754 1755
> plot(log10(UKgas))
> plot(cbind(fitted(fit), resids=resid(fit)), main = "UK gas consumption")
> 
> ## keep some parameters fixed; trace optimizer:
> StructTS(log10(UKgas), type = "BSM", fixed = c(0.1,0.001,NA,NA),
1756
+          optim.control = list(trace = TRUE))
1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791
iter   10 value -0.936176
final  value -0.936176 
converged

Call:
StructTS(x = log10(UKgas), type = "BSM", fixed = c(0.1, 0.001, NA, NA), optim.control = list(trace = TRUE))

Variances:
    level      slope       seas    epsilon  
0.1000000  0.0010000  0.0003012  0.0000000  
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("TDist")
> ### * TDist
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: TDist
> ### Title: The Student t Distribution
> ### Aliases: TDist dt pt qt rt
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
> 1 - pt(1:5, df = 1)
[1] 0.25000000 0.14758362 0.10241638 0.07797913 0.06283296
> qt(.975, df = c(1:10,20,50,100,1000))
 [1] 12.706205  4.302653  3.182446  2.776445  2.570582  2.446912  2.364624
 [8]  2.306004  2.262157  2.228139  2.085963  2.008559  1.983972  1.962339
> 
1792 1793 1794
> tt <- seq(0, 10, len = 21)
> ncp <- seq(0, 6, len = 31)
> ptn <- outer(tt, ncp, function(t, d) pt(t, df = 3, ncp = d))
1795
> t.tit <- "Non-central t - Probabilities"
1796 1797
> image(tt, ncp, ptn, zlim = c(0,1), main = t.tit)
> persp(tt, ncp, ptn, zlim = 0:1, r = 2, phi = 20, theta = 200, main = t.tit,
1798 1799 1800 1801
+       xlab = "t", ylab = "non-centrality parameter",
+       zlab = "Pr(T <= t)")
> 
> plot(function(x) dt(x, df = 3, ncp = 2), -3, 11, ylim = c(0, 0.32),
1802
+      main = "Non-central t - Density", yaxs = "i")
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
> 
> 
> 
> cleanEx()
> nameEx("Tukey")
> ### * Tukey
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Tukey
> ### Title: The Studentized Range Distribution
> ### Aliases: Tukey ptukey qtukey
> ### Keywords: distribution
> 
> ### ** Examples
> 
> if(interactive())
1820 1821
+   curve(ptukey(x, nm = 6, df = 5), from = -1, to = 8, n = 101)
> (ptt <- ptukey(0:10, 2, df =  5))
1822 1823
 [1] 0.0000000 0.4889159 0.7835628 0.9126407 0.9632574 0.9833586 0.9918510
 [8] 0.9957141 0.9976011 0.9985838 0.9991249
1824
> (qtt <- qtukey(.95, 2, df =  2:11))
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
 [1] 6.079637 4.500659 3.926503 3.635351 3.460456 3.344084 3.261182 3.199173
 [9] 3.151064 3.112663
> ## The precision may be not much more than about 8 digits:
> 
> 
> 
> cleanEx()
> nameEx("TukeyHSD")
> ### * TukeyHSD
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: TukeyHSD
> ### Title: Compute Tukey Honest Significant Differences
1839
> ### Aliases: TukeyHSD
1840 1841 1842 1843 1844 1845 1846
> ### Keywords: models design
> 
> ### ** Examples
> 
> require(graphics)
> 
> summary(fm1 <- aov(breaks ~ wool + tension, data = warpbreaks))
1847 1848 1849 1850
            Df Sum Sq Mean Sq F value  Pr(>F)   
wool         1    451   450.7   3.339 0.07361 . 
tension      2   2034  1017.1   7.537 0.00138 **
Residuals   50   6748   135.0                   
1851
---
1852
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892
> TukeyHSD(fm1, "tension", ordered = TRUE)
  Tukey multiple comparisons of means
    95% family-wise confidence level
    factor levels have been ordered

Fit: aov(formula = breaks ~ wool + tension, data = warpbreaks)

$tension
         diff        lwr      upr     p adj
M-H  4.722222 -4.6311985 14.07564 0.4474210
L-H 14.722222  5.3688015 24.07564 0.0011218
L-M 10.000000  0.6465793 19.35342 0.0336262

> plot(TukeyHSD(fm1, "tension"))
> 
> 
> 
> cleanEx()
> nameEx("Uniform")
> ### * Uniform
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Uniform
> ### Title: The Uniform Distribution
> ### Aliases: Uniform dunif punif qunif runif
> ### Keywords: distribution
> 
> ### ** Examples
> 
> u <- runif(20)
> 
> ## The following relations always hold :
> punif(u) == u
 [1] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE
[16] TRUE TRUE TRUE TRUE TRUE
> dunif(u) == 1
 [1] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE
[16] TRUE TRUE TRUE TRUE TRUE
> 
1893
> var(runif(10000))  #- ~ = 1/12 = .08333
1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910
[1] 0.08475621
> 
> 
> 
> cleanEx()
> nameEx("Weibull")
> ### * Weibull
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Weibull
> ### Title: The Weibull Distribution
> ### Aliases: Weibull dweibull pweibull qweibull rweibull
> ### Keywords: distribution
> 
> ### ** Examples
> 
1911
> x <- c(0, rlnorm(50))
1912 1913 1914 1915 1916
> all.equal(dweibull(x, shape = 1), dexp(x))
[1] TRUE
> all.equal(pweibull(x, shape = 1, scale = pi), pexp(x, rate = 1/pi))
[1] TRUE
> ## Cumulative hazard H():
1917
> all.equal(pweibull(x, 2.5, pi, lower.tail = FALSE, log.p = TRUE),
1918
+           -(x/pi)^2.5, tolerance = 1e-15)
1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
[1] TRUE
> all.equal(qweibull(x/11, shape = 1, scale = pi), qexp(x/11, rate = 1/pi))
[1] TRUE
> 
> 
> 
> cleanEx()
> nameEx("Wilcoxon")
> ### * Wilcoxon
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Wilcoxon
> ### Title: Distribution of the Wilcoxon Rank Sum Statistic
> ### Aliases: Wilcoxon dwilcox pwilcox qwilcox rwilcox
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
> x <- -1:(4*6 + 1)
> fx <- dwilcox(x, 4, 6)
> Fx <- pwilcox(x, 4, 6)
> 
1944 1945 1946 1947 1948 1949 1950
> layout(rbind(1,2), widths = 1, heights = c(3,2))
> plot(x, fx, type = "h", col = "violet",
+      main =  "Probabilities (density) of Wilcoxon-Statist.(n=6, m=4)")
> plot(x, Fx, type = "s", col = "blue",
+      main =  "Distribution of Wilcoxon-Statist.(n=6, m=4)")
> abline(h = 0:1, col = "gray20", lty = 2)
> layout(1) # set back
1951 1952
> 
> N <- 200
1953 1954 1955 1956 1957
> hist(U <- rwilcox(N, m = 4,n = 6), breaks = 0:25 - 1/2,
+      border = "red", col = "pink", sub = paste("N =",N))
> mtext("N * f(x),  f() = true \"density\"", side = 3, col = "blue")
>  lines(x, N*fx, type = "h", col = "blue", lwd = 2)
> points(x, N*fx, cex = 2)
1958 1959
> 
> ## Better is a Quantile-Quantile Plot
1960
> qqplot(U, qw <- qwilcox((1:N - 1/2)/N, m = 4, n = 6),
1961
+        main = paste("Q-Q-Plot of empirical and theoretical quantiles",
1962
+                      "Wilcoxon Statistic,  (m=4, n=6)", sep = "\n"))
1963 1964 1965 1966
> n <- as.numeric(names(print(tU <- table(U))))
U
 0  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 
 1  1  4  6  7 10  8 12 14 22 13 16 13 23 16 11  7  4  4  3  3  1  1 
1967
> text(n+.2, n+.5, labels = tU, col = "red")
1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
> 
> 
> 
> cleanEx()
> nameEx("acf")
> ### * acf
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: acf
> ### Title: Auto- and Cross- Covariance and -Correlation Function Estimation
> ### Aliases: acf ccf pacf pacf.default [.acf
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## Examples from Venables & Ripley
> acf(lh)
> acf(lh, type = "covariance")
> pacf(lh)
> 
> acf(ldeaths)
> acf(ldeaths, ci.type = "ma")
> acf(ts.union(mdeaths, fdeaths))
> ccf(mdeaths, fdeaths, ylab = "cross-correlation")
> # (just the cross-correlations)
> 
> presidents # contains missing values
     Qtr1 Qtr2 Qtr3 Qtr4
1945   NA   87   82   75
1946   63   50   43   32
1947   35   60   54   55
1948   36   39   NA   NA
1949   69   57   57   51
1950   45   37   46   39
1951   36   24   32   23
1952   25   32   NA   32
1953   59   74   75   60
1954   71   61   71   57
1955   71   68   79   73
1956   76   71   67   75
1957   79   62   63   57
1958   60   49   48   52
1959   57   62   61   66
1960   71   62   61   57
1961   72   83   71   78
1962   79   71   62   74
1963   76   64   62   57
1964   80   73   69   69
1965   71   64   69   62
1966   63   46   56   44
1967   44   52   38   46
1968   36   49   35   44
1969   59   65   65   56
1970   66   53   61   52
1971   51   48   54   49
1972   49   61   NA   NA
1973   68   44   40   27
1974   28   25   24   24
> acf(presidents, na.action = na.pass)
> pacf(presidents, na.action = na.pass)
> 
> 
> 
> cleanEx()
> nameEx("acf2AR")
> ### * acf2AR
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: acf2AR
> ### Title: Compute an AR Process Exactly Fitting an ACF
> ### Aliases: acf2AR
> ### Keywords: ts
> 
> ### ** Examples
> 
> (Acf <- ARMAacf(c(0.6, 0.3, -0.2)))
        0         1         2         3 
1.0000000 0.6923077 0.5769231 0.3538462 
> acf2AR(Acf)
              1      2    3
ar(1) 0.6923077 0.0000  0.0
ar(2) 0.5625000 0.1875  0.0
ar(3) 0.6000000 0.3000 -0.2
> 
> 
> 
> cleanEx()
> nameEx("add1")
> ### * add1
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: add1
> ### Title: Add or Drop All Possible Single Terms to a Model
> ### Aliases: add1 add1.default add1.lm add1.glm drop1 drop1.default
> ###   drop1.lm drop1.glm
> ### Keywords: models
> 
> ### ** Examples
> 
> ## Don't show: 
2073
> od <- options(digits = 5)
2074
> ## End(Don't show)
2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
> require(graphics); require(utils)
> ## following example(swiss)
> lm1 <- lm(Fertility ~ ., data = swiss)
> add1(lm1, ~ I(Education^2) + .^2)
Single term additions

Model:
Fertility ~ Agriculture + Examination + Education + Catholic + 
    Infant.Mortality
                             Df Sum of Sq  RSS AIC
<none>                                    2105 191
I(Education^2)                1      11.8 2093 192
Agriculture:Examination       1      10.7 2094 192
Agriculture:Education         1       1.8 2103 193
Agriculture:Catholic          1      75.0 2030 191
Agriculture:Infant.Mortality  1       4.4 2101 193
Examination:Education         1      48.7 2056 192
Examination:Catholic          1      40.8 2064 192
Examination:Infant.Mortality  1      65.9 2039 191
Education:Catholic            1     278.2 1827 186
Education:Infant.Mortality    1      93.0 2012 191
Catholic:Infant.Mortality     1       2.4 2103 193
2097
> drop1(lm1, test = "F")  # So called 'type II' anova
2098 2099 2100 2101 2102
Single term deletions

Model:
Fertility ~ Agriculture + Examination + Education + Catholic + 
    Infant.Mortality
2103
                 Df Sum of Sq  RSS AIC F value  Pr(>F)    
2104 2105 2106 2107 2108 2109 2110
<none>                        2105 191                    
Agriculture       1       308 2413 195    5.99  0.0187 *  
Examination       1        53 2158 190    1.03  0.3155    
Education         1      1163 3268 209   22.64 2.4e-05 ***
Catholic          1       448 2553 198    8.72  0.0052 ** 
Infant.Mortality  1       409 2514 197    7.96  0.0073 ** 
---
2111
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
> 
> ## following example(glm)
> ## Don't show: 
> example(glm, echo = FALSE)
  treatment outcome counts
1         1       1     18
2         1       2     17
3         1       3     15
4         2       1     20
5         2       2     10
6         2       3     20
7         3       1     25
8         3       2     13
9         3       3     12
2126
> ## End(Don't show)
2127
> drop1(glm.D93, test = "Chisq")
2128 2129 2130 2131
Single term deletions

Model:
counts ~ outcome + treatment
2132 2133 2134 2135
          Df Deviance  AIC  LRT Pr(>Chi)  
<none>           5.13 56.8                
outcome    2    10.58 58.2 5.45    0.065 .
treatment  2     5.13 52.8 0.00    1.000  
2136
---
2137
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
2138
> drop1(glm.D93, test = "F")
2139 2140 2141 2142 2143 2144
Warning in drop1.glm(glm.D93, test = "F") :
  F test assumes 'quasipoisson' family
Single term deletions

Model:
counts ~ outcome + treatment
2145 2146 2147 2148
          Df Deviance  AIC F value Pr(>F)
<none>           5.13 56.8               
outcome    2    10.58 58.2    2.13   0.23
treatment  2     5.13 52.8    0.00   1.00
2149
> add1(glm.D93, scope = ~outcome*treatment, test = "Rao") ## Pearson Chi-square
2150 2151 2152 2153 2154 2155 2156
Single term additions

Model:
counts ~ outcome + treatment
                  Df Deviance  AIC Rao score Pr(>Chi)
<none>                   5.13 56.8                   
outcome:treatment  4     0.00 59.6      5.17     0.27
2157 2158
> ## Don't show: 
> options(od)
2159
> ## End(Don't show)
2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259
> 
> 
> 
> cleanEx()
> nameEx("addmargins")
> ### * addmargins
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: addmargins
> ### Title: Puts Arbitrary Margins on Multidimensional Tables or Arrays
> ### Aliases: addmargins
> ### Keywords: manip array
> 
> ### ** Examples
> 
> Aye <- sample(c("Yes", "Si", "Oui"), 177, replace = TRUE)
> Bee <- sample(c("Hum", "Buzz"), 177, replace = TRUE)
> Sea <- sample(c("White", "Black", "Red", "Dead"), 177, replace = TRUE)
> (A <- table(Aye, Bee, Sea))
, , Sea = Black

     Bee
Aye   Buzz Hum
  Oui    2  11
  Si     8  13
  Yes    7   6

, , Sea = Dead

     Bee
Aye   Buzz Hum
  Oui    5  10
  Si    10  10
  Yes    7   6

, , Sea = Red

     Bee
Aye   Buzz Hum
  Oui    5   6
  Si    11   5
  Yes    7   7

, , Sea = White

     Bee
Aye   Buzz Hum
  Oui    7   9
  Si     4  12
  Yes    3   6

> addmargins(A)
, , Sea = Black

     Bee
Aye   Buzz Hum Sum
  Oui    2  11  13
  Si     8  13  21
  Yes    7   6  13
  Sum   17  30  47

, , Sea = Dead

     Bee
Aye   Buzz Hum Sum
  Oui    5  10  15
  Si    10  10  20
  Yes    7   6  13
  Sum   22  26  48

, , Sea = Red

     Bee
Aye   Buzz Hum Sum
  Oui    5   6  11
  Si    11   5  16
  Yes    7   7  14
  Sum   23  18  41

, , Sea = White

     Bee
Aye   Buzz Hum Sum
  Oui    7   9  16
  Si     4  12  16
  Yes    3   6   9
  Sum   14  27  41

, , Sea = Sum

     Bee
Aye   Buzz Hum Sum
  Oui   19  36  55
  Si    33  40  73
  Yes   24  25  49
  Sum   76 101 177

> ## Don't show: 
> stopifnot(is.table(addmargins(A)))
2260
> ## End(Don't show)
2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
> ftable(A)
         Sea Black Dead Red White
Aye Bee                          
Oui Buzz         2    5   5     7
    Hum         11   10   6     9
Si  Buzz         8   10  11     4
    Hum         13   10   5    12
Yes Buzz         7    7   7     3
    Hum          6    6   7     6
> ftable(addmargins(A))
         Sea Black Dead Red White Sum
Aye Bee                              
Oui Buzz         2    5   5     7  19
    Hum         11   10   6     9  36
    Sum         13   15  11    16  55
Si  Buzz         8   10  11     4  33
    Hum         13   10   5    12  40
    Sum         21   20  16    16  73
Yes Buzz         7    7   7     3  24
    Hum          6    6   7     6  25
    Sum         13   13  14     9  49
Sum Buzz        17   22  23    14  76
    Hum         30   26  18    27 101
    Sum         47   48  41    41 177
> 
> # Non-commutative functions - note differences between resulting tables:
2287
> ftable(addmargins(A, c(1, 3),
2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302
+        FUN = list(Sum = sum, list(Min = min, Max = max))))
Margins computed over dimensions
in the following order:
1: Aye
2: Sea
         Sea Black Dead Red White Min Max
Aye Bee                                  
Oui Buzz         2    5   5     7   2   7
    Hum         11   10   6     9   6  11
Si  Buzz         8   10  11     4   4  11
    Hum         13   10   5    12   5  13
Yes Buzz         7    7   7     3   3   7
    Hum          6    6   7     6   6   7
Sum Buzz        17   22  23    14  14  23
    Hum         30   26  18    27  18  30
2303
> ftable(addmargins(A, c(3, 1),
2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344
+        FUN = list(list(Min = min, Max = max), Sum = sum)))
Margins computed over dimensions
in the following order:
1: Sea
2: Aye
         Sea Black Dead Red White Min Max
Aye Bee                                  
Oui Buzz         2    5   5     7   2   7
    Hum         11   10   6     9   6  11
Si  Buzz         8   10  11     4   4  11
    Hum         13   10   5    12   5  13
Yes Buzz         7    7   7     3   3   7
    Hum          6    6   7     6   6   7
Sum Buzz        17   22  23    14   9  25
    Hum         30   26  18    27  17  31
> 
> # Weird function needed to return the N when computing percentages
> sqsm <- function(x) sum(x)^2/100
> B <- table(Sea, Bee)
> round(sweep(addmargins(B, 1, list(list(All = sum, N = sqsm))), 2,
+             apply(B, 2, sum)/100, "/"), 1)
       Bee
Sea      Buzz   Hum
  Black  22.4  29.7
  Dead   28.9  25.7
  Red    30.3  17.8
  White  18.4  26.7
  All   100.0 100.0
  N      76.0 101.0
> round(sweep(addmargins(B, 2, list(list(All = sum, N = sqsm))), 1,
+             apply(B, 1, sum)/100, "/"), 1)
       Bee
Sea      Buzz   Hum   All     N
  Black  36.2  63.8 100.0  47.0
  Dead   45.8  54.2 100.0  48.0
  Red    56.1  43.9 100.0  41.0
  White  34.1  65.9 100.0  41.0
> 
> # A total over Bee requires formation of the Bee-margin first:
> mB <-  addmargins(B, 2, FUN = list(list(Total = sum)))
> round(ftable(sweep(addmargins(mB, 1, list(list(All = sum, N = sqsm))), 2,
2345
+                    apply(mB, 2, sum)/100, "/")), 1)
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
      Bee  Buzz   Hum Total
Sea                        
Black      22.4  29.7  26.6
Dead       28.9  25.7  27.1
Red        30.3  17.8  23.2
White      18.4  26.7  23.2
All       100.0 100.0 100.0
N          76.0 101.0 177.0
> 
> ## Zero.Printing table+margins:
> set.seed(1)
2357 2358
> x <- sample( 1:7, 20, replace = TRUE)
> y <- sample( 1:7, 20, replace = TRUE)
2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430
> tx <- addmargins( table(x, y) )
> print(tx, zero.print = ".")
     y
x      1  2  3  4  5  6  7 Sum
  1    .  .  1  .  .  .  .   1
  2    .  1  .  1  1  .  1   4
  3    .  2  .  .  .  1  .   3
  4    .  .  .  .  1  .  .   1
  5    .  .  1  1  1  .  1   4
  6    .  .  1  .  .  2  .   3
  7    3  .  1  .  .  .  .   4
  Sum  3  3  4  2  3  3  2  20
> 
> 
> 
> cleanEx()
> nameEx("aggregate")
> ### * aggregate
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: aggregate
> ### Title: Compute Summary Statistics of Data Subsets
> ### Aliases: aggregate aggregate.default aggregate.data.frame
> ###   aggregate.formula aggregate.ts
> ### Keywords: category array
> 
> ### ** Examples
> 
> ## Compute the averages for the variables in 'state.x77', grouped
> ## according to the region (Northeast, South, North Central, West) that
> ## each state belongs to.
> aggregate(state.x77, list(Region = state.region), mean)
         Region Population   Income Illiteracy Life Exp    Murder  HS Grad
1     Northeast   5495.111 4570.222   1.000000 71.26444  4.722222 53.96667
2         South   4208.125 4011.938   1.737500 69.70625 10.581250 44.34375
3 North Central   4803.000 4611.083   0.700000 71.76667  5.275000 54.51667
4          West   2915.308 4702.615   1.023077 71.23462  7.215385 62.00000
     Frost      Area
1 132.7778  18141.00
2  64.6250  54605.12
3 138.8333  62652.00
4 102.1538 134463.00
> 
> ## Compute the averages according to region and the occurrence of more
> ## than 130 days of frost.
> aggregate(state.x77,
+           list(Region = state.region,
+                Cold = state.x77[,"Frost"] > 130),
+           mean)
         Region  Cold Population   Income Illiteracy Life Exp    Murder
1     Northeast FALSE  8802.8000 4780.400  1.1800000 71.12800  5.580000
2         South FALSE  4208.1250 4011.938  1.7375000 69.70625 10.581250
3 North Central FALSE  7233.8333 4633.333  0.7833333 70.95667  8.283333
4          West FALSE  4582.5714 4550.143  1.2571429 71.70000  6.828571
5     Northeast  TRUE  1360.5000 4307.500  0.7750000 71.43500  3.650000
6 North Central  TRUE  2372.1667 4588.833  0.6166667 72.57667  2.266667
7          West  TRUE   970.1667 4880.500  0.7500000 70.69167  7.666667
   HS Grad    Frost      Area
1 52.06000 110.6000  21838.60
2 44.34375  64.6250  54605.12
3 53.36667 120.0000  56736.50
4 60.11429  51.0000  91863.71
5 56.35000 160.5000  13519.00
6 55.66667 157.6667  68567.50
7 64.20000 161.8333 184162.17
> ## (Note that no state in 'South' is THAT cold.)
> 
> 
> ## example with character variables and NAs
> testDF <- data.frame(v1 = c(1,3,5,7,8,3,5,NA,4,5,7,9),
+                      v2 = c(11,33,55,77,88,33,55,NA,44,55,77,99) )
2431 2432
> by1 <- c("red", "blue", 1, 2, NA, "big", 1, 2, "red", 1, NA, 12)
> by2 <- c("wet", "dry", 99, 95, NA, "damp", 95, 99, "red", 99, NA, NA)
2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572
> aggregate(x = testDF, by = list(by1, by2), FUN = "mean")
  Group.1 Group.2 v1 v2
1       1      95  5 55
2       2      95  7 77
3       1      99  5 55
4       2      99 NA NA
5     big    damp  3 33
6    blue     dry  3 33
7     red     red  4 44
8     red     wet  1 11
> 
> # and if you want to treat NAs as a group
> fby1 <- factor(by1, exclude = "")
> fby2 <- factor(by2, exclude = "")
> aggregate(x = testDF, by = list(fby1, fby2), FUN = "mean")
   Group.1 Group.2  v1   v2
1        1      95 5.0 55.0
2        2      95 7.0 77.0
3        1      99 5.0 55.0
4        2      99  NA   NA
5      big    damp 3.0 33.0
6     blue     dry 3.0 33.0
7      red     red 4.0 44.0
8      red     wet 1.0 11.0
9       12    <NA> 9.0 99.0
10    <NA>    <NA> 7.5 82.5
> 
> 
> ## Formulas, one ~ one, one ~ many, many ~ one, and many ~ many:
> aggregate(weight ~ feed, data = chickwts, mean)
       feed   weight
1    casein 323.5833
2 horsebean 160.2000
3   linseed 218.7500
4  meatmeal 276.9091
5   soybean 246.4286
6 sunflower 328.9167
> aggregate(breaks ~ wool + tension, data = warpbreaks, mean)
  wool tension   breaks
1    A       L 44.55556
2    B       L 28.22222
3    A       M 24.00000
4    B       M 28.77778
5    A       H 24.55556
6    B       H 18.77778
> aggregate(cbind(Ozone, Temp) ~ Month, data = airquality, mean)
  Month    Ozone     Temp
1     5 23.61538 66.73077
2     6 29.44444 78.22222
3     7 59.11538 83.88462
4     8 59.96154 83.96154
5     9 31.44828 76.89655
> aggregate(cbind(ncases, ncontrols) ~ alcgp + tobgp, data = esoph, sum)
       alcgp    tobgp ncases ncontrols
1  0-39g/day 0-9g/day      9       261
2      40-79 0-9g/day     34       179
3     80-119 0-9g/day     19        61
4       120+ 0-9g/day     16        24
5  0-39g/day    10-19     10        84
6      40-79    10-19     17        85
7     80-119    10-19     19        49
8       120+    10-19     12        18
9  0-39g/day    20-29      5        42
10     40-79    20-29     15        62
11    80-119    20-29      6        16
12      120+    20-29      7        12
13 0-39g/day      30+      5        28
14     40-79      30+      9        29
15    80-119      30+      7        12
16      120+      30+     10        13
> 
> ## Dot notation:
> aggregate(. ~ Species, data = iris, mean)
     Species Sepal.Length Sepal.Width Petal.Length Petal.Width
1     setosa        5.006       3.428        1.462       0.246
2 versicolor        5.936       2.770        4.260       1.326
3  virginica        6.588       2.974        5.552       2.026
> aggregate(len ~ ., data = ToothGrowth, mean)
  supp dose   len
1   OJ  0.5 13.23
2   VC  0.5  7.98
3   OJ  1.0 22.70
4   VC  1.0 16.77
5   OJ  2.0 26.06
6   VC  2.0 26.14
> 
> ## Often followed by xtabs():
> ag <- aggregate(len ~ ., data = ToothGrowth, mean)
> xtabs(len ~ ., data = ag)
    dose
supp   0.5     1     2
  OJ 13.23 22.70 26.06
  VC  7.98 16.77 26.14
> 
> 
> ## Compute the average annual approval ratings for American presidents.
> aggregate(presidents, nfrequency = 1, FUN = mean)
Time Series:
Start = 1945 
End = 1974 
Frequency = 1 
 [1]    NA 47.00 51.00    NA 58.50 41.75 28.75    NA 67.00 65.00 72.75 72.25
[13] 65.25 52.25 61.50 62.75 76.00 71.50 64.75 72.75 66.50 52.25 45.00 41.00
[25] 61.25 58.00 50.50    NA 44.75 25.25
> ## Give the summer less weight.
> aggregate(presidents, nfrequency = 1,
+           FUN = weighted.mean, w = c(1, 1, 0.5, 1))
Time Series:
Start = 1945 
End = 1974 
Frequency = 1 
 [1]       NA 47.57143 50.57143       NA 58.71429 41.14286 28.28571       NA
 [9] 65.85714 64.14286 71.85714 73.00000 65.57143 52.85714 61.57143 63.00000
[17] 76.71429 72.85714 65.14286 73.28571 66.14286 51.71429 46.00000 41.85714
[25] 60.71429 57.57143 50.00000       NA 45.42857 25.42857
> 
> 
> 
> cleanEx()
> nameEx("alias")
> ### * alias
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: alias
> ### Title: Find Aliases (Dependencies) in a Model
> ### Aliases: alias alias.formula alias.lm
> ### Keywords: models
> 
> ### ** Examples
> 
> 
> cleanEx()
> nameEx("anova.glm")
> ### * anova.glm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: anova.glm
> ### Title: Analysis of Deviance for Generalized Linear Model Fits
2573
> ### Aliases: anova.glm
2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591
> ### Keywords: models regression
> 
> ### ** Examples
> 
> ## --- Continuing the Example from  '?glm':
> ## Don't show: 
> require(utils)
> example("glm", echo = FALSE)
  treatment outcome counts
1         1       1     18
2         1       2     17
3         1       3     15
4         2       1     20
5         2       2     10
6         2       3     20
7         3       1     25
8         3       2     13
9         3       3     12
2592
> ## End(Don't show)
2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
> anova(glm.D93)
Analysis of Deviance Table

Model: poisson, link: log

Response: counts

Terms added sequentially (first to last)


          Df Deviance Resid. Df Resid. Dev
NULL                          8    10.5814
outcome    2   5.4523         6     5.1291
treatment  2   0.0000         4     5.1291
> anova(glm.D93, test = "Cp")
Analysis of Deviance Table

Model: poisson, link: log

Response: counts

Terms added sequentially (first to last)


          Df Deviance Resid. Df Resid. Dev     Cp
NULL                          8    10.5814 12.581
outcome    2   5.4523         6     5.1291 11.129
treatment  2   0.0000         4     5.1291 15.129
> anova(glm.D93, test = "Chisq")
Analysis of Deviance Table

Model: poisson, link: log

Response: counts

Terms added sequentially (first to last)


2631 2632 2633 2634
          Df Deviance Resid. Df Resid. Dev Pr(>Chi)  
NULL                          8    10.5814           
outcome    2   5.4523         6     5.1291  0.06547 .
treatment  2   0.0000         4     5.1291  1.00000  
2635
---
2636
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
2637 2638
> glm.D93a <-
+    update(glm.D93, ~treatment*outcome) # equivalent to Pearson Chi-square
2639 2640 2641 2642 2643 2644 2645 2646
> anova(glm.D93, glm.D93a, test = "Rao")
Analysis of Deviance Table

Model 1: counts ~ outcome + treatment
Model 2: counts ~ treatment + outcome + treatment:outcome
  Resid. Df Resid. Dev Df Deviance    Rao Pr(>Chi)
1         4     5.1291                            
2         0     0.0000  4   5.1291 5.1732     0.27
2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675
> 
> 
> 
> cleanEx()
> nameEx("anova.lm")
> ### * anova.lm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: anova.lm
> ### Title: ANOVA for Linear Model Fits
> ### Aliases: anova.lm anova.lmlist
> ### Keywords: regression models
> 
> ### ** Examples
> 
> ## sequential table
> fit <- lm(sr ~ ., data = LifeCycleSavings)
> anova(fit)
Analysis of Variance Table

Response: sr
          Df Sum Sq Mean Sq F value    Pr(>F)    
pop15      1 204.12 204.118 14.1157 0.0004922 ***
pop75      1  53.34  53.343  3.6889 0.0611255 .  
dpi        1  12.40  12.401  0.8576 0.3593551    
ddpi       1  63.05  63.054  4.3605 0.0424711 *  
Residuals 45 650.71  14.460                      
---
2676
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
2677 2678 2679 2680 2681 2682 2683
> 
> ## same effect via separate models
> fit0 <- lm(sr ~ 1, data = LifeCycleSavings)
> fit1 <- update(fit0, . ~ . + pop15)
> fit2 <- update(fit1, . ~ . + pop75)
> fit3 <- update(fit2, . ~ . + dpi)
> fit4 <- update(fit3, . ~ . + ddpi)
2684
> anova(fit0, fit1, fit2, fit3, fit4, test = "F")
2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698
Analysis of Variance Table

Model 1: sr ~ 1
Model 2: sr ~ pop15
Model 3: sr ~ pop15 + pop75
Model 4: sr ~ pop15 + pop75 + dpi
Model 5: sr ~ pop15 + pop75 + dpi + ddpi
  Res.Df    RSS Df Sum of Sq       F    Pr(>F)    
1     49 983.63                                   
2     48 779.51  1   204.118 14.1157 0.0004922 ***
3     47 726.17  1    53.343  3.6889 0.0611255 .  
4     46 713.77  1    12.401  0.8576 0.3593551    
5     45 650.71  1    63.054  4.3605 0.0424711 *  
---
2699
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
2700
> 
2701
> anova(fit4, fit2, fit0, test = "F") # unconventional order
2702 2703 2704 2705 2706 2707 2708 2709 2710 2711
Analysis of Variance Table

Model 1: sr ~ pop15 + pop75 + dpi + ddpi
Model 2: sr ~ pop15 + pop75
Model 3: sr ~ 1
  Res.Df    RSS Df Sum of Sq      F    Pr(>F)    
1     45 650.71                                  
2     47 726.17 -2   -75.455 2.6090 0.0847088 .  
3     49 983.63 -2  -257.460 8.9023 0.0005527 ***
---
2712
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
> 
> 
> 
> cleanEx()
> nameEx("anova.mlm")
> ### * anova.mlm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: anova.mlm
> ### Title: Comparisons between Multivariate Linear Models
2724
> ### Aliases: anova.mlm
2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746
> ### Keywords: regression models multivariate
> 
> ### ** Examples
> 
> require(graphics)
> utils::example(SSD) # Brings in the mlmfit and reacttime objects

SSD> # Lifted from Baron+Li:
SSD> # "Notes on the use of R for psychology experiments and questionnaires"
SSD> # Maxwell and Delaney, p. 497
SSD> reacttime <- matrix(c(
SSD+ 420, 420, 480, 480, 600, 780,
SSD+ 420, 480, 480, 360, 480, 600,
SSD+ 480, 480, 540, 660, 780, 780,
SSD+ 420, 540, 540, 480, 780, 900,
SSD+ 540, 660, 540, 480, 660, 720,
SSD+ 360, 420, 360, 360, 480, 540,
SSD+ 480, 480, 600, 540, 720, 840,
SSD+ 480, 600, 660, 540, 720, 900,
SSD+ 540, 600, 540, 480, 720, 780,
SSD+ 480, 420, 540, 540, 660, 780),
SSD+ ncol = 6, byrow = TRUE,
2747 2748 2749
SSD+ dimnames = list(subj = 1:10,
SSD+               cond = c("deg0NA", "deg4NA", "deg8NA",
SSD+                        "deg0NP", "deg4NP", "deg8NP")))
2750

2751
SSD> mlmfit <- lm(reacttime ~ 1)
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786

SSD> SSD(mlmfit)
$SSD
        cond
cond     deg0NA deg4NA deg8NA deg0NP deg4NP deg8NP
  deg0NA  29160  30600  26640  23760  32400  25560
  deg4NA  30600  66600  32400   7200  36000  30600
  deg8NA  26640  32400  56160  41040  57600  69840
  deg0NP  23760   7200  41040  70560  72000  63360
  deg4NP  32400  36000  57600  72000 108000 100800
  deg8NP  25560  30600  69840  63360 100800 122760

$call
lm(formula = reacttime ~ 1)

$df
[1] 9

attr(,"class")
[1] "SSD"

SSD> estVar(mlmfit)
        cond
cond     deg0NA deg4NA deg8NA deg0NP deg4NP deg8NP
  deg0NA   3240   3400   2960   2640   3600   2840
  deg4NA   3400   7400   3600    800   4000   3400
  deg8NA   2960   3600   6240   4560   6400   7760
  deg0NP   2640    800   4560   7840   8000   7040
  deg4NP   3600   4000   6400   8000  12000  11200
  deg8NP   2840   3400   7760   7040  11200  13640
> 
> mlmfit0 <- update(mlmfit, ~0)
> 
> ### Traditional tests of intrasubj. contrasts
> ## Using MANOVA techniques on contrasts:
2787
> anova(mlmfit, mlmfit0, X = ~1)
2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799
Analysis of Variance Table

Model 1: reacttime ~ 1
Model 2: reacttime ~ 1 - 1

Contrasts orthogonal to
~1

  Res.Df Df Gen.var. Pillai approx F num Df den Df   Pr(>F)   
1      9      1249.6                                          
2     10  1   2013.2 0.9456   17.381      5      5 0.003534 **
---
2800
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
2801 2802
> 
> ## Assuming sphericity
2803
> anova(mlmfit, mlmfit0, X = ~1, test = "Spherical")
2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820
Analysis of Variance Table

Model 1: reacttime ~ 1
Model 2: reacttime ~ 1 - 1

Contrasts orthogonal to
~1

Greenhouse-Geisser epsilon: 0.4855
Huynh-Feldt epsilon:        0.6778

  Res.Df Df Gen.var.      F num Df den Df     Pr(>F)    G-G Pr    H-F Pr
1      9      1249.6                                                    
2     10  1   2013.2 38.028      5     45 4.4711e-15 2.532e-08 7.393e-11
> 
> 
> ### tests using intra-subject 3x2 design