Introduction

Vignettes are long form documentation commonly included in packages. Because they are part of the distribution of the package, they need to be as compact as possible. The html_vignette output type provides a custom style sheet (and tweaks some options) to ensure that the resulting html is as small as possible. The html_vignette format:

  • Never uses retina figures
  • Has a smaller default figure size
  • Uses a custom CSS stylesheet instead of the default Twitter Bootstrap style

Basic usage

library(calibrar)
calibrate(par=rep(NA, 5), fn=sphereN)
#> 
#> Phase 1 finished in 0.59s (5 of 5 parameters active)
#> Function value: 0.0560202
#> Parameter values: -0.00721 0.00125 0.047 -0.000134 0.00891
library(calibrar)
calibrate(par=rep(NA, 5), fn=sphereN, replicates=3)
#> 
#> Phase 1 finished in 0.45s (5 of 5 parameters active)
#> Function value: 0.0583132
#> Parameter values: 0.0231 0.0308 -0.022 -0.00235 -0.0596
library(calibrar)
calibrate(par=rep(0.5, 5), fn=sphereN, replicates=3, lower=-5, upper=5)
#> Warning in .checkBounds(lower = lower, upper = upper, npar = npar): Only one
#> lower bound has been provided, used for all parameters.
#> Warning in .checkBounds(lower = lower, upper = upper, npar = npar): Only one
#> upper bound has been provided, used for all parameters.
#> 
#> Phase 1 finished in 0.41s (5 of 5 parameters active)
#> Function value: 0.0631647
#> Parameter values: -0.0338 -0.041 -0.0246 0.0391 -0.0461
calibrate(par=rep(0.5, 5), fn=sphereN, replicates=c(1,1,4), lower=-5, upper=5, phases=c(1,1,1,2,3))
#> Warning in .checkBounds(lower = lower, upper = upper, npar = npar): Only one
#> lower bound has been provided, used for all parameters.
#> Warning in .checkBounds(lower = lower, upper = upper, npar = npar): Only one
#> upper bound has been provided, used for all parameters.
#> 
#> Phase 1 finished in 0.33s (3 of 5 parameters active)
#> Function value: 0.66303
#> Parameter values: 0.116 0.262 -0.096
#> 
#> Phase 2 finished in 0.59s (4 of 5 parameters active)
#> Function value: 0.166406
#> Parameter values: 0.0521 0.0534 -0.00815 0.0613
#> 
#> Phase 3 finished in 0.52s (5 of 5 parameters active)
#> Function value: 0.0538903
#> Parameter values: -0.0368 -0.0418 0.0072 0.0409 0.0305
# this calibration save results on the disk for restart purposes
calibrate(par=rep(0.5, 5), fn=sphereN, replicates=3, lower=-5, upper=5, phases=c(1,1,1,2,3), control=list(restart.file="sphere"))
#> Warning in .checkBounds(lower = lower, upper = upper, npar = npar): Only one
#> lower bound has been provided, used for all parameters.
#> Warning in .checkBounds(lower = lower, upper = upper, npar = npar): Only one
#> upper bound has been provided, used for all parameters.
#> 
#> Phase 1 finished in 0.46s (3 of 5 parameters active)
#> Function value: 0.590158
#> Parameter values: -0.00918 0.0282 -0.00396
#> 
#> Phase 2 finished in 0.61s (4 of 5 parameters active)
#> Function value: 0.428713
#> Parameter values: 0.165 0.0403 0.129 -0.0913
#> 
#> Phase 3 finished in 0.46s (5 of 5 parameters active)
#> Function value: 0.0375388
#> Parameter values: 0.00908 -0.0176 -0.0572 0.0555 -0.0311

Running in parallel

library(calibrar)
library(doSNOW)
set.seed(880820)
ncores = 6 # number of cores to be used
cl = makeCluster(ncores, type="SOCK")
registerDoSNOW(cl) # register the parallel backend
# this is slower than sequential for very fast models (like this one)
calib = calibrate(par=rep(0.5, 5), fn=sphereN, 
                  replicates=3, 
                  lower=rep(-5, 5), 
                  upper=rep(+5, 5), 
                  phases=c(1,1,1,2,3), 
                  control=list(parallel=TRUE, ncores=ncores))
stopCluster(cl) # close the parallel connections

Optimization algorithms available

Benchmarking parameter estimation for a linear model

You can write math expressions, e.g. \(Y = X\beta + \epsilon\), footnotes1, and tables, e.g. using knitr::kable().

N = 7 # number of variables in the linear model
T = 200 # number of observations
noise = FALSE # add gaussian noise to the model
shift = FALSE # add a random shift to the slopes
sd = runif(1) # standard deviation of the gaussian noise
# observed data
x = t(matrix(rnorm(N*T, sd=sd), nrow=N, ncol=T))
# slopes for the linear model (real parameters)
slope = seq_len(N) + shift*sample(c(-10, 10), N, replace=TRUE)
# intercept for the linear model (real parameters)
intercept = pi
# real parameters
real = list(intercept=intercept, slope=slope)
# function to simulate the linear model
linear = function(x, par) {
  stopifnot(length(x)==length(par$slope))
  out = sum(x*par$slope) + par$intercept
  return(out)
}
# simulated data
y = apply(x, 1, linear, par=real) + noise*rnorm(nrow(x), sd=mean(sd))
# objective function (residual squares sum)
obj = function(par, x, y) {
  y_sim = apply(x, 1, linear, par=par)
  out = sum((y_sim - y)^2)
  return(out)
}
lower = relist(rep(-10, N+1), skeleton=start)
upper = relist(rep(+10, N+1), skeleton=start)
# initial guess for optimization
start = list(intercept=0, slope=rep(0, N))
cat("Running optimization algorithms\n")
cat("\t", date(), "\n")

cat("Running linear model\n")
mod = lm(y ~ x)

cat("Running calibrar AHR-ES (unconstrained)\n")
es  = calibrate(par=start, fn=obj, x=x, y=y)
#> 
#> Phase 1 finished in 5.62s (8 of 8 parameters active)
#> Function value: 2.25172e-17
#> Parameter values: 3.14 1 2 3 4 5 6 7

cat("Running calibrar AHR-ES (constrained)\n")
es2 = calibrate(par=start, fn=obj, x=x, y=y, 
                           lower=lower, upper=upper)
#> 
#> Phase 1 finished in 4.40s (8 of 8 parameters active)
#> Function value: 4.32381e-14
#> Parameter values: 3.14 1 2 3 4 5 6 7

cat("Running optim CG\n")
cg  = calibrate(par=start, fn=obj, x=x, y=y, method="Rcgmin")
#> 
#> Phase 1 finished in 0.58s (8 of 8 parameters active)
#> Function value: 1.20587e-12
#> Parameter values: 3.14 1 2 3 4 5 6 7

cat("Running optim SANN\n")
sann = calibrate(par=start, fn=obj, x=x, y=y, method="SANN")
#> 
#> Phase 1 finished in 2m 52.1s (8 of 8 parameters active)
#> Function value: 1982.7
#> Parameter values: 0 0 0 0 0 0 0 0

cat("Running optimx Nelder-Mead\n")
nm = calibrate(par=start, fn=obj, x=x, y=y, method="Nelder-Mead")
#> 
#> Phase 1 finished in 0.65s (8 of 8 parameters active)
#> Function value: 11.1941
#> Parameter values: 3.14 -1.37 -2.42 -0.742 -0.43 -2.32 1.37 1.57

cat("Running optimx BFGS\n")
bfgs = calibrate(par=start, fn=obj, x=x, y=y, method="L-BFGS-B")
#> 
#> Phase 1 finished in 0.27s (8 of 8 parameters active)
#> Function value: 3.54439e-08
#> Parameter values: 3.14 1 2 3 4 5 6 7

cat("Running cmaes CMA-ES\n")
cma = calibrate(par=start, fn=obj, x=x, y=y, 
                                   lower=lower, upper=upper, method="cmaes")
#> 
#> Phase 1 finished in 1m 31.9s (8 of 8 parameters active)
#> Function value: 7.88861e-31
#> Parameter values: 3.14 1 2 3 4 5 6 7
final = rbind(real=unlist(real),
              lm=coef(mod),
              'BFGS'        = unlist(bfgs$par),
              'CG'          = unlist(cg$par),
              'AHR-ES'      = unlist(es$par),
              'AHR-ES (constrained)' = unlist(es2$par),
              'SANN'        = unlist(sann$par),
              'CMA-ES'      = unlist(cma$par),
              'Nelder-Mead' = unlist(nm$par))
intercept slope1 slope2 slope3 slope4 slope5 slope6 slope7
real 3.141593 1.0000000 2.000000 3.0000000 4.0000000 5.000000 6.000000 7.000000
lm 3.141593 1.0000000 2.000000 3.0000000 4.0000000 5.000000 6.000000 7.000000
BFGS 3.141593 0.9999929 1.999589 2.9996429 3.9998612 4.999638 5.999748 6.999776
CG 3.141593 1.0000006 1.999997 2.9999991 4.0000008 4.999998 6.000000 7.000000
AHR-ES 3.141593 1.0000000 2.000000 3.0000000 4.0000000 5.000000 6.000000 7.000000
AHR-ES (constrained) 3.141593 1.0000001 2.000000 3.0000001 4.0000001 5.000000 6.000001 7.000000
SANN 0.000000 0.0000000 0.000000 0.0000000 0.0000000 0.000000 0.000000 0.000000
CMA-ES 3.141593 1.0000000 2.000000 3.0000000 4.0000000 5.000000 6.000000 7.000000
Nelder-Mead 3.137008 -1.3728057 -2.420156 -0.7420178 -0.4299983 -2.318960 1.371857 1.570374

Worked examples

Fitting an EDO system: the Lotka-Volterra Predator-Prey model

Fitting an EDO system: the SIR epidemiological model

Fitting an autoregressive Poisson model

Fitting an stochastic model: an IBM Lotka-Volterra model

Figures

The figure sizes have been customised so that you can easily put two images side-by-side.

plot(1:10)
plot(10:1)

You can enable figure captions by fig_caption: yes in YAML:

output:
  rmarkdown::html_vignette:
    fig_caption: yes

Then you can use the chunk option fig.cap = "Your figure caption." in knitr.

Also a quote using >:

“He who gives up [code] safety for [code] speed deserves neither.” (via)


  1. A footnote here.↩︎