From 6e7812f746d153e399439e2c55bef024ac980c4b Mon Sep 17 00:00:00 2001
From: wbonat <wbonat@gmail.com>
Date: Mon, 4 Jan 2016 18:42:45 +0100
Subject: [PATCH] Fix indentation in examples

---
 Examples/Examples1.R   | 266 +++++++++++++++++++++++++----------------
 Examples/GLMExamples.R | 171 +++++++++++++++-----------
 2 files changed, 267 insertions(+), 170 deletions(-)

diff --git a/Examples/Examples1.R b/Examples/Examples1.R
index d1b6803..d1ab22b 100755
--- a/Examples/Examples1.R
+++ b/Examples/Examples1.R
@@ -1,67 +1,80 @@
-# Set of examples 1 - Simulated univariate models ------------------------------
-# Author: Wagner Hugo Bonat LEG/IMADA ------------------------------------------
-# Date: 07/08/2015 -------------------------------------------------------------
-# Lastest updated: 28/08/2015 --------------------------------------------------
-#-------------------------------------------------------------------------------
+# Set of examples 1 - Univariate models --------------------------------
+# Author: Wagner Hugo Bonat LEG/IMADA ----------------------------------
+# Date: 07/08/2015 -----------------------------------------------------
+# Lastest updated: 28/08/2015 ------------------------------------------
+#-----------------------------------------------------------------------
 rm(list=ls())
 
-# Loading extra package --------------------------------------------------------
+# Loading extra package ------------------------------------------------
 require(mcglm)
+require(Matrix)
 require(tweedie)
 require(dplyr)
 require(mvtnorm)
 
-# Setting the seed -------------------------------------------------------------
+# Setting the seed -----------------------------------------------------
 set.seed(2503)
 
-# Case 1 - Linear regression model ---------------------------------------------
+# Case 1 - Linear regression model -------------------------------------
 covariate <- seq(-1,1, l = 100)
 X <- model.matrix(~ covariate)
-mu1 <- mc_link_function(beta = c(1,0.8), X = X, offset = NULL, link = "identity")
+mu1 <- mc_link_function(beta = c(1,0.8), X = X, offset = NULL, 
+                        link = "identity")
 y1 <- rnorm(100, mu1$mu, sd = 0.5)
 Z0 <- Diagonal(100, 1)
 data <- data.frame("y1" = y1, "covariate" = covariate)
 
-# Linear Regression model -------------------------------------------------------
-fit1.id <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-                 data = data, control_algorithm = list("correct" = FALSE, verbose = FALSE))
+# Linear Regression model ----------------------------------------------
+fit1.id <- mcglm(linear_pred = c(y1 ~ covariate), 
+                 matrix_pred = list("resp1" = list(Z0)),
+                 data = data, 
+                 control_algorithm = list("correct" = FALSE, 
+                                          "verbose" = FALSE))
 summary(fit1.id)
 
-# Using inverse covariance link function -----------------------------------------
-fit1.inv <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
+# Using the inverse covariance link function ---------------------------
+fit1.inv <- mcglm(linear_pred = c(y1 ~ covariate), 
+                  matrix_pred = list("resp1" = list(Z0)),
                   covariance = "inverse", data = data,
-                  control_algorithm = list(verbose = FALSE, "correct" = FALSE))
+                  control_algorithm = list("verbose" = FALSE, 
+                                           "correct" = FALSE))
 summary(fit1.inv)
 
-# Using exponential-matrix covariance link function -------------------------------
-fit1.expm <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
+# Using the exponential-matrix covariance link function ----------------
+fit1.expm <- mcglm(linear_pred = c(y1 ~ covariate), 
+                   matrix_pred = list("resp1" = list(Z0)),
                    covariance = "expm", data = data,
-                   control_algorithm = list(verbose = FALSE, "correct" = FALSE))
+                   control_algorithm = list("verbose" = FALSE, 
+                                            "correct" = FALSE))
 summary(fit1.expm)
 
-# Comparing tau estimates using diferent covariance link functions ----------------
+# Comparing estimates of tau using diferent covariance link functions --
 coef(fit1.id, type = "tau")$Estimates
 1/coef(fit1.inv, type = "tau")$Estimates
 exp(coef(fit1.expm, type = "tau")$Estimates)
 
-# Case 2 - Linear regression model with heterocedastic errors ---------------------
+# Case 2 - Linear regression model with heteroscedasticity -------------
 covariate <- seq(-1,1, l = 100)
 X <- model.matrix(~ covariate)
-mu1 <- mc_link_function(beta = c(1,0.8), X = X, offset = NULL, link = "identity")
+mu1 <- mc_link_function(beta = c(1,0.8), X = X, offset = NULL, 
+                        link = "identity")
 Z0 <- Diagonal(100, 1)
 Z1 <- Diagonal(100, c(rep(0,50),rep(1,50)))
 Sigma <- mc_matrix_linear_predictor(tau = c(0.2, 0.15), Z = list(Z0,Z1))
 y1 <- rnorm(100, mu1$mu, sd = sqrt(diag(Sigma)))
 data <- data.frame("y1" = y1, "covariate" = covariate)
 
-# Fitting using identity covariance function --------------------------------------
-fit2.id <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0,Z1)), data = data)
+# Fitting using identity covariance function ---------------------------
+fit2.id <- mcglm(linear_pred = c(y1 ~ covariate), 
+                 matrix_pred = list("resp1" = list(Z0,Z1)), 
+                 data = data)
 summary(fit2.id)
 
-# Case 3 - Longitudinal model using compound symmetry ------------------------------
+# Case 3 - Longitudinal model using compound symmetry ------------------
 covariate <- seq(-1,1, l = 200)
 X <- model.matrix(~ covariate)
-mu1 <- mc_link_function(beta = c(1,0.8), X = X, offset = NULL, link = "identity")
+mu1 <- mc_link_function(beta = c(1,0.8), X = X, offset = NULL, 
+                        link = "identity")
 Z0 <- Diagonal(200, 1)
 Z1.temp <- Matrix(rep(1,10)%*%t(rep(1,10)))
 Z1.list <- list()
@@ -71,194 +84,243 @@ Sigma <- mc_matrix_linear_predictor(tau = c(0.2, 0.15), Z = list(Z0,Z1))
 y1 <- as.numeric(rmvnorm(1, mean = mu1$mu, sigma = as.matrix(Sigma)))
 data <- data.frame("y1" = y1, "covariate" = covariate)
 
-# Fitting using identity covariance function --------------------------------------
-fit3.id <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0,Z1)), data = data)
+# Fitting using identity covariance function ---------------------------
+fit3.id <- mcglm(linear_pred = c(y1 ~ covariate), 
+                 matrix_pred = list("resp1" = list(Z0,Z1)), data = data)
 summary(fit3.id)
 
-# Fitting using exponential-matrix covariance function ----------------------------
-fit3.expm <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0,Z1)),
-                  covariance = "expm", data = data)
+# Fitting using exponential-matrix covariance function -----------------
+fit3.expm <- mcglm(linear_pred = c(y1 ~ covariate), 
+                   matrix_pred = list("resp1" = list(Z0,Z1)),
+                   covariance = "expm", data = data)
 summary(fit3.expm)
 
-# Case 4 - Logistic regression model ----------------------------------------------
+# Case 4 - Logistic regression model -----------------------------------
 covariate <- seq(-1,1, l = 250)
 X <- model.matrix(~ covariate)
-mu1 <- mc_link_function(beta = c(1,0.8), X = X, offset = NULL, link = "logit")
+mu1 <- mc_link_function(beta = c(1,0.8), X = X, offset = NULL, 
+                        link = "logit")
 Z0 <- Diagonal(250, 1)
 y1 <- rbinom(250, prob = mu1$mu, size = 10)/10
 data <- data.frame("y1" = y1, "covariate" = covariate)
 
-# Logit link function -------------------------------------------------------------
-fit4.logit <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-                    link = "logit", variance = "binomialP", Ntrial = list(rep(10,250)), data = data)
+# Logit link function --------------------------------------------------
+fit4.logit <- mcglm(linear_pred = c(y1 ~ covariate), 
+                    matrix_pred = list("resp1" = list(Z0)),
+                    link = "logit", variance = "binomialP", 
+                    Ntrial = list(rep(10,250)), data = data)
 summary(fit4.logit)
 
-# Probit link function -------------------------------------------------------------
-fit4.probit <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-                     link = "probit", variance = "binomialP", Ntrial = list(rep(10,250)), data = data)
+# Probit link function -------------------------------------------------
+fit4.probit <- mcglm(linear_pred = c(y1 ~ covariate), 
+                     matrix_pred = list("resp1" = list(Z0)),
+                     link = "probit", variance = "binomialP", 
+                     Ntrial = list(rep(10,250)), data = data)
 summary(fit4.probit)
 
-# Cauchit link function ------------------------------------------------------------
-fit4.cauchit <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-                      link = "cauchit", variance = "binomialP", Ntrial = list(rep(10,250)), data = data)
+# Cauchit link function ------------------------------------------------
+fit4.cauchit <- mcglm(linear_pred = c(y1 ~ covariate), 
+                      matrix_pred = list("resp1" = list(Z0)),
+                      link = "cauchit", variance = "binomialP", 
+                      Ntrial = list(rep(10,250)), data = data)
 summary(fit4.cauchit)
 
-# Cloglog link function ------------------------------------------------------------
-fit4.cloglog <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-                      link = "cloglog", variance = "binomialP", Ntrial = list(rep(10,250)), data = data)
+# Cloglog link function ------------------------------------------------
+fit4.cloglog <- mcglm(linear_pred = c(y1 ~ covariate), 
+                      matrix_pred = list("resp1" = list(Z0)),
+                      link = "cloglog", variance = "binomialP", 
+                      Ntrial = list(rep(10,250)), data = data)
 summary(fit4.cloglog)
 
-# loglog link function --------------------------------------------------------------
-fit4.loglog <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-                     link = "loglog", variance = "binomialP", Ntrial = list(rep(10,250)), data = data)
+# loglog link function -------------------------------------------------
+fit4.loglog <- mcglm(linear_pred = c(y1 ~ covariate), 
+                     matrix_pred = list("resp1" = list(Z0)),
+                     link = "loglog", variance = "binomialP", 
+                     Ntrial = list(rep(10,250)), data = data)
 summary(fit4.loglog)
 
-# Example 5 - Logistic regression with extra power parameter in the variance function
-fit5 <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-              link = "logit", variance = "binomialP", Ntrial = list(rep(10,250)),
+# Example 5 - Logistic regression with extra power parameter -----------
+fit5 <- mcglm(linear_pred = c(y1 ~ covariate), 
+              matrix_pred = list("resp1" = list(Z0)),
+              link = "logit", variance = "binomialP", 
+              Ntrial = list(rep(10,250)),
               power_fixed = list(FALSE), data = data)
-
 summary(fit5)
 
-# Example 6 - Logistic regression with two extra power parameters in the variance function
-# This model can be very hard to fit and require very carefull initial values and tunning.
-fit6 <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-              link = "logit", variance = "binomialPQ", Ntrial = list(rep(10,250)),
+# Example 6 - Logistic regression with two extra power parameters ------
+# This model can be very hard to fit and require very carefull 
+# initial values and tunning.
+fit6 <- mcglm(linear_pred = c(y1 ~ covariate), 
+              matrix_pred = list("resp1" = list(Z0)),
+              link = "logit", variance = "binomialPQ", 
+              Ntrial = list(rep(10,250)),
               power_fixed = list(FALSE), data = data,
-              control_algorithm = list("method" = "chaser", "tunning" = 0.1,
-                                       "max_iter" = 1000, verbose = FALSE))
+              control_algorithm = list("method" = "chaser", 
+                                       "tunning" = 0.1,
+                                       "max_iter" = 1000, 
+                                       "verbose" = FALSE))
 summary(fit6)
 plot(fit6, type = "algorithm")
 
-# Case 7 - Gamma regression model -------------------------------------------------
+# Case 7 - Gamma regression model --------------------------------------
 covariate <- seq(-1,1, l = 100)
 X <- model.matrix(~ covariate)
-mu1 <- mc_link_function(beta = c(1,0.8), X = X, offset = NULL, link = "log")
+mu1 <- mc_link_function(beta = c(1,0.8), X = X, offset = NULL, 
+                        link = "log")
 Z0 <- Diagonal(100, 1)
 y1 <- rtweedie(100, mu = mu1$mu, power = 2, phi = 0.5)
 data <- data.frame("y1" = y1, "covariate" = covariate)
 
-# Initial values -------------------------------------------------------------------
+# Initial values -------------------------------------------------------
 list_initial = list()
 list_initial$regression <- list("resp1" = c(1,0))
 list_initial$power <- list("resp1" = c(2))
 list_initial$tau <- list("resp1" = c(0.1))
 list_initial$rho = 0
 
-# Power parameter fixed -------------------------------------------------------------
-fit7 <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-              link = "log", variance = "tweedie", power_fixed = list(TRUE),
+# Power parameter fixed ------------------------------------------------
+fit7 <- mcglm(linear_pred = c(y1 ~ covariate), 
+              matrix_pred = list("resp1" = list(Z0)),
+              link = "log", variance = "tweedie", 
+              power_fixed = list(TRUE),
               control_initial = list_initial, data = data)
 summary(fit7)
 plot(fit7, type = "algorithm")
 
-# Estimating the power parameter ----------------------------------------------------
-fit7.power <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-              link = "log", variance = "tweedie", power_fixed = FALSE, data = data)
+# Estimating the power parameter ---------------------------------------
+fit7.power <- mcglm(linear_pred = c(y1 ~ covariate), 
+                    matrix_pred = list("resp1" = list(Z0)),
+                    link = "log", variance = "tweedie",
+                    control_initial = list_initial,
+                    power_fixed = FALSE, data = data)
 summary(fit7.power)
 
-# Case 8 - Inverse Gaussian regression model ----------------------------------------
+# Case 8 - Inverse Gaussian regression model ---------------------------
 covariate <- seq(-2,2, l = 200)
 X <- model.matrix(~ covariate)
-mu1 <- mc_link_function(beta = c(1,0.8), X = X, offset = NULL, link = "log")
+mu1 <- mc_link_function(beta = c(1,0.8), X = X, offset = NULL, 
+                        link = "log")
 Z0 <- Diagonal(200, 1)
 y1 <- rtweedie(200, mu = mu1$mu, power = 3, phi = 0.5)
 data <- data.frame("y1" = y1, "covariate" = covariate)
 
-# Initial values list ----------------------------------------------------------------
+# Initial values list --------------------------------------------------
 list_initial = list()
 list_initial$regression <- list("resp1" = c(1,0))
 list_initial$power <- list("resp1" = c(3))
 list_initial$tau <- list("resp1" = c(0.1))
 list_initial$rho = 0
 
-# Power parameter fixed --------------------------------------------------------------
-fit8 <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-              link = "log", variance = "tweedie", data = data, control_initial = list_initial)
+# Power parameter fixed ------------------------------------------------
+fit8 <- mcglm(linear_pred = c(y1 ~ covariate), 
+              matrix_pred = list("resp1" = list(Z0)),
+              link = "log", variance = "tweedie", data = data, 
+              control_initial = list_initial)
 summary(fit8)
 
-# Estimating the power parameter -----------------------------------------------------
-fit8.power <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-                    link = "log", variance = "tweedie", power_fixed = list(FALSE), data = data,
+# Estimating the power parameter ---------------------------------------
+fit8.power <- mcglm(linear_pred = c(y1 ~ covariate), 
+                    matrix_pred = list("resp1" = list(Z0)),
+                    link = "log", variance = "tweedie", 
+                    power_fixed = list(FALSE), data = data,
                     control_initial = list_initial)
 summary(fit8.power)
 plot(fit8.power, type = "algorithm")
 
 
-# Case 9 - Poisson-Tweedie regression model -------------------------------------------
+# Case 9 - Poisson-Tweedie regression model ----------------------------
 y1 <- rtweedie(200, mu = mu1$mu, power = 1.5, phi = 0.5)
 data <- data.frame("y1" = y1, "covariate" = covariate)
 
-fit9 <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-              link = "log", variance = "tweedie", power_fixed = list(FALSE), data = data,
-              control_algorithm = list("method" = "chaser", "tunning" = 1))
+fit9 <- mcglm(linear_pred = c(y1 ~ covariate), 
+              matrix_pred = list("resp1" = list(Z0)),
+              link = "log", variance = "tweedie", 
+              power_fixed = list(FALSE), data = data,
+              control_algorithm = list("method" = "chaser", 
+                                       "tunning" = 1))
 summary(fit9)
 plot(fit9, type = "algorithm")
 
-# Case 10 - Poisson regression model --------------------------------------------------
+# Case 10 - Poisson regression model -----------------------------------
 y1 <- rtweedie(200, mu = mu1$mu, power = 1, phi = 1)
 data <- data.frame("y1" = y1, "covariate" = covariate)
 
-fit10 <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-              link = "log", variance = "tweedie",  power_fixed = list(FALSE), data = data,
-              control_algorithm = list("method" = "rc", "tunning" = 0.1))
+fit10 <- mcglm(linear_pred = c(y1 ~ covariate), 
+               matrix_pred = list("resp1" = list(Z0)),
+               link = "log", variance = "tweedie",  
+               power_fixed = list(FALSE), data = data,
+               control_algorithm = list("method" = "rc", 
+                                        "tunning" = 0.1))
 summary(fit10)
 
-# Case 11 - Poisson-Tweedie regression model (Neymann-Type A) --------------------------
+# Case 11 - Poisson-Tweedie regression model (Neymann-Type A) ----------
 # Neymann-Type A
 y1 <- rtweedie(200, mu = mu1$mu, power = 1, phi = 1)
 y1 <- rpois(200, lambda = y1)
 data <- data.frame("y1" = y1, "covariate" = covariate)
 
-fit11 <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-               link = "log", variance = "poisson_tweedie", power_fixed = list(TRUE), data = data)
+fit11 <- mcglm(linear_pred = c(y1 ~ covariate), 
+               matrix_pred = list("resp1" = list(Z0)),
+               link = "log", variance = "poisson_tweedie", 
+               power_fixed = list(TRUE), data = data)
 summary(fit11)
 
-# Case 12 - Poisson-Tweedie regression model (Negative Binomial) -----------------------
+# Case 12 - Poisson-Tweedie regression model (Negative Binomial) -------
 y1 <- rtweedie(200, mu = mu1$mu, power = 2, phi = 1.5)
 y1 <- rpois(200, lambda = y1)
 data <- data.frame("y1" = y1, "covariate" = covariate)
 
-# Initial values list ------------------------------------------------------------------
+# Initial values list --------------------------------------------------
 list_initial = list()
 list_initial$regression <- list("resp1" = c(1,0))
 list_initial$power <- list("resp1" = c(2))
 list_initial$tau <- list("resp1" = c(1))
 list_initial$rho = 0
 
-fit12 <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-               link = "log", variance = "poisson_tweedie", power_fixed = list(FALSE), data = data,
+fit12 <- mcglm(linear_pred = c(y1 ~ covariate), 
+               matrix_pred = list("resp1" = list(Z0)),
+               link = "log", variance = "poisson_tweedie", 
+               power_fixed = list(FALSE), data = data,
                control_initial = list_initial,
-               control_algorithm = list("method" = "rc", "tunning" = 0.2))
+               control_algorithm = list("method" = "rc", 
+                                        "tunning" = 0.2))
 summary(fit12)
 
-# Case 13 - Poisson-Tweedie regression model (PIG - Poisson Inverse Gaussian)---------
-y1 <- rtweedie(200, mu = mu1$mu, power = 3, phi = 1.5)
+# Case 13 - Poisson-Tweedie regression model 
+# (PIG - Poisson Inverse Gaussian)---------
+y1 <- rtweedie(200, mu = mu1$mu, power = 3, phi = 0.1)
 y1 <- rpois(200, lambda = y1)
 data <- data.frame("y1" = y1, "covariate" = covariate)
 
-# Initial values list -----------------------------------------------------------------
+# Initial values list --------------------------------------------------
 list_initial = list()
 list_initial$regression <- list("resp1" = c(1,0.8))
 list_initial$power <- list("resp1" = c(3))
-list_initial$tau <- list("resp1" = c(0.5))
+list_initial$tau <- list("resp1" = c(0.1))
 list_initial$rho = 0
 
-fit13 <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-               link = "log", variance = "poisson_tweedie", data = data, control_initial = list_initial,
-               control_algorithm = list("method" = "rc", "tunning" = 0.1))
+fit13 <- mcglm(linear_pred = c(y1 ~ covariate), 
+               matrix_pred = list("resp1" = list(Z0)),
+               link = "log", variance = "poisson_tweedie", data = data, 
+               control_initial = list_initial,
+               power_fixed = FALSE,
+               control_algorithm = list("method" = "rc", 
+                                        "tunning" = 1, 
+                                        "max_iter" = 100))
 summary(fit13)
 
-# Case 14 - Poisson-Tweedie regression model (Pólya-Aeppli) ---------------------------
+# Case 14 - Poisson-Tweedie regression model (Pólya-Aeppli) ------------
 y1 <- rtweedie(200, mu = mu1$mu, power = 1.5, phi = 1.5)
 y1 <- rpois(200, lambda = y1)
 data <- data.frame("y1" = y1, "covariate" = covariate)
 
-fit14 <- mcglm(linear_pred = c(y1 ~ covariate), matrix_pred = list("resp1" = list(Z0)),
-               link = "log", variance = "poisson_tweedie", power_fixed = FALSE, data = data)
+fit14 <- mcglm(linear_pred = c(y1 ~ covariate), 
+               matrix_pred = list("resp1" = list(Z0)),
+               link = "log", variance = "poisson_tweedie", 
+               power_fixed = FALSE, data = data)
 summary(fit14)
 
-# Methods ------------------------------------------------------------------------
+# Methods --------------------------------------------------------------
 # print
 fit14
 
diff --git a/Examples/GLMExamples.R b/Examples/GLMExamples.R
index 652f23a..aacd195 100755
--- a/Examples/GLMExamples.R
+++ b/Examples/GLMExamples.R
@@ -1,35 +1,40 @@
-# Set of examples 2 - GLM examples ----------------------------------------------
-# Author: Wagner Hugo Bonat LEG/IMADA -------------------------------------------
-# Date: 08/08/2015 --------------------------------------------------------------
-#--------------------------------------------------------------------------------
+# Set of examples 2 - GLM examples -------------------------------------
+# Author: Wagner Hugo Bonat LEG/IMADA ----------------------------------
+# Date: 08/08/2015 -----------------------------------------------------
+#-----------------------------------------------------------------------
 rm(list=ls())
 
 # Loading extra packages
 require(mcglm)
-
-# Case 1 ------------------------------------------------------------------------
+require(Matrix)
+# Case 1 ---------------------------------------------------------------
 ## Dobson (1990) Page 93: Randomized Controlled Trial :
 counts <- c(18,17,15,20,10,20,25,13,12)
 outcome <- gl(3,1,9)
 treatment <- gl(3,3)
 print(d.AD <- data.frame(treatment, outcome, counts))
 
-# Orthodox Poisson model
-fit.glm <- glm(counts ~ outcome + treatment, family = quasipoisson())
+# Orthodox Poisson model -----------------------------------------------
+fit.glm <- glm(counts ~ outcome + treatment, family = poisson())
 summary(fit.glm)
 
-# Quasi-Poisson model via mcglm---------------------------------------------------
+# Quasi-Poisson model via mcglm-----------------------------------------
 Z0 <- Diagonal(dim(d.AD)[1],1)
-fit.qglm <- mcglm(linear_pred = c(counts ~ outcome + treatment), matrix_pred = list("resp1" = list(Z0)),
+fit.qglm <- mcglm(linear_pred = c(counts ~ outcome + treatment), 
+                  matrix_pred = list("resp1" = list(Z0)),
                   link = "log", variance = "tweedie", data = d.AD,
-                  control_algorithm = list("verbose" = FALSE, "method" = "chaser", "tunning" = 0.8))
+                  control_algorithm = list("verbose" = FALSE, 
+                                           "method" = "chaser", 
+                                           "tunning" = 0.8))
 summary(fit.qglm)
-cbind("mcglm" = round(coef(fit.qglm, type = "beta")$Estimates,5), "glm" = round(coef(fit.glm),5))
-cbind("mcglm" = sqrt(diag(vcov(fit.qglm))), "glm" = c(sqrt(diag(vcov(fit.glm))),NA))
+cbind("mcglm" = round(coef(fit.qglm, type = "beta")$Estimates,5), 
+      "glm" = round(coef(fit.glm),5))
+cbind("mcglm" = sqrt(diag(vcov(fit.qglm))), 
+      "glm" = c(sqrt(diag(vcov(fit.glm))),NA))
 plot(fit.qglm)
 plot(fit.qglm, type = "algorithm")
 
-# Poisson-Tweedie model via mcglm------------------------------------------------
+# Poisson-Tweedie model via mcglm---------------------------------------
 list_initial = list()
 list_initial$regression <- list("resp1" = coef(fit.glm) )
 list_initial$power <- list("resp1" = c(1))
@@ -37,57 +42,70 @@ list_initial$tau <- list("resp1" = c(0.01))
 list_initial$rho = 0
 Z0 <- Diagonal(dim(d.AD)[1],1)
 
-fit.pt <- mcglm(linear_pred = c(counts ~ outcome + treatment), matrix_pred = list("resp1" = list(Z0)),
-                  link = "log", variance = "poisson_tweedie",
-                  data = d.AD, control_initial = list_initial,
-                  control_algorithm = list("correct" = TRUE, tol = 1e-5,
-                                           max_iter = 100, method = "chaser", "tunning" = 1))
+fit.pt <- mcglm(linear_pred = c(counts ~ outcome + treatment), 
+                matrix_pred = list("resp1" = list(Z0)),
+                link = "log", variance = "poisson_tweedie",
+                power_fixed = TRUE,
+                data = d.AD, control_initial = list_initial,
+                control_algorithm = list("correct" = TRUE, 
+                                         "verbose" = TRUE,
+                                         "tol" = 1e-5,
+                                         "max_iter" = 100, 
+                                         "method" = "chaser", 
+                                         "tunning" = 1))
 summary(fit.pt)
-cbind("mcglm" = round(coef(fit.pt, type = "beta")$Estimates,5), "glm" = round(coef(fit.glm),5))
-cbind("mcglm" = sqrt(diag(vcov(fit.pt))), "glm" = c(sqrt(diag(vcov(fit.glm))),NA))
+cbind("mcglm" = round(coef(fit.pt, type = "beta")$Estimates,5), 
+      "glm" = round(coef(fit.glm),5))
+cbind("mcglm" = sqrt(diag(vcov(fit.pt))), 
+      "glm" = c(sqrt(diag(vcov(fit.glm))),NA))
 
-# This model is unsuitable for this data, note that the dispersion parameter is negative, indicating
-# underdispersion. Which agrees with my quasi Poisson model, but the glm function does not
-# agree with this result. I have to understand this difference.
+# This model is unsuitable for this data, note that the dispersion 
+# parameter is negative, indicating underdispersion. 
+# Which agrees with my quasi-Poisson model, but the glm function 
+# does not agree with this result. I have to understand this difference.
 
-# Case 2 ------------------------------------------------------------------------
-# an example with offsets from Venables & Ripley (2002, p.189)
+# Case 2 ---------------------------------------------------------------
+# An example with offsets from Venables & Ripley (2002, p.189)
 
 # Loading the data set
 utils::data(anorexia, package = "MASS")
 
-# Orthodox GLM fit --------------------------------------------------------------
+# Orthodox GLM fit -----------------------------------------------------
 anorex.1 <- glm(Postwt ~ Prewt + Treat + offset(Prewt),
                family = gaussian, data = anorexia)
 summary(anorex.1)
 
-# Fitting by mcglm --------------------------------------------------------------
+# Fitting by mcglm -----------------------------------------------------
 Z0 <- Diagonal(dim(anorexia)[1],1)
 
-fit.anorexia <- mcglm(linear_pred = c(Postwt ~ Prewt + Treat), matrix_pred = list("resp1" = list(Z0)),
-                link = "identity", variance = "constant", offset = list(anorexia$Prewt),
-                power_fixed = list(TRUE), data = anorexia,
-                control_algorithm = list("correct" = FALSE))
+fit.anorexia <- mcglm(linear_pred = c(Postwt ~ Prewt + Treat), 
+                      matrix_pred = list("resp1" = list(Z0)),
+                      link = "identity", variance = "constant", 
+                      offset = list(anorexia$Prewt),
+                      power_fixed = TRUE, data = anorexia,
+                      control_algorithm = list("correct" = FALSE))
 summary(fit.anorexia)
 
-# Comparing the results ---------------------------------------------------------
+# Comparing the results ------------------------------------------------
 cbind("mcglm" = round(coef(fit.anorexia, type = "beta")$Estimates,5),
       "glm" = round(coef(anorex.1),5))
 cbind("mcglm" = sqrt(diag(vcov(fit.anorexia))),
       "glm" = c(sqrt(diag(vcov(anorex.1))),NA))
 
 
-# Case 3 ------------------------------------------------------------------------
+# Case 3 ---------------------------------------------------------------
 # A Gamma example, from McCullagh & Nelder (1989, pp.300-2)
 clotting <- data.frame(
   u = c(5,10,15,20,30,40,60,80,100),
   lot1 = c(118,58,42,35,27,25,21,19,18),
   lot2 = c(69,35,26,21,18,16,13,12,12))
-fit.lot1 <- glm(lot1 ~ log(u), data = clotting, family = Gamma(link = "inverse"))
-fit.lot2 <- glm(lot2 ~ log(u), data = clotting, family = Gamma(link = "inverse"))
+fit.lot1 <- glm(lot1 ~ log(u), data = clotting, 
+                family = Gamma(link = "inverse"))
+fit.lot2 <- glm(lot2 ~ log(u), data = clotting, 
+                family = Gamma(link = "inverse"))
 summary(fit.lot1)
 
-# Initial values -----------------------------------------------------------------
+# Initial values -------------------------------------------------------
 list_initial = list()
 list_initial$regression <- list("resp1" = coef(fit.lot1))
 list_initial$power <- list("resp1" = c(2))
@@ -95,10 +113,11 @@ list_initial$tau <- list("resp1" = summary(fit.lot1)$dispersion)
 list_initial$rho = 0
 Z0 <- Diagonal(dim(clotting)[1],1)
 
-# Fitting ------------------------------------------------------------------------
-fit.lot1.mcglm <- mcglm(linear_pred = c(lot1 ~ log(u)), matrix_pred = list("resp1" = list(Z0)),
-                      link = "inverse", variance = "tweedie",
-                      data = clotting, control_initial = list_initial)
+# Fitting --------------------------------------------------------------
+fit.lot1.mcglm <- mcglm(linear_pred = c(lot1 ~ log(u)), 
+                        matrix_pred = list("resp1" = list(Z0)),
+                        link = "inverse", variance = "tweedie",
+                        data = clotting, control_initial = list_initial)
 summary(fit.lot1.mcglm)
 
 cbind("mcglm" = round(coef(fit.lot1.mcglm, type = "beta")$Estimates,5),
@@ -106,13 +125,15 @@ cbind("mcglm" = round(coef(fit.lot1.mcglm, type = "beta")$Estimates,5),
 cbind("mcglm" = sqrt(diag(vcov(fit.lot1.mcglm))),
       "glm" = c(sqrt(diag(vcov(fit.lot1))),NA))
 
-# Initial values -----------------------------------------------------------------
+# Initial values -------------------------------------------------------
 list_initial$regression <- list("resp1" = coef(fit.lot2))
 list_initial$tau <- list("resp1" = c(var(1/clotting$lot2)))
 
-# Fitting ------------------------------------------------------------------------
-fit.lot2.mcglm <- mcglm(linear_pred = c(lot2 ~ log(u)), matrix_pred = list("resp2" = list(Z0)),
-                        link = "inverse", variance = "tweedie", data = clotting,
+# Fitting --------------------------------------------------------------
+fit.lot2.mcglm <- mcglm(linear_pred = c(lot2 ~ log(u)), 
+                        matrix_pred = list("resp2" = list(Z0)),
+                        link = "inverse", variance = "tweedie", 
+                        data = clotting,
                         control_initial = list_initial)
 summary(fit.lot2.mcglm)
 
@@ -121,24 +142,29 @@ cbind("mcglm" = round(coef(fit.lot2.mcglm, type = "beta")$Estimates,5),
 cbind("mcglm" = sqrt(diag(vcov(fit.lot2.mcglm))),
       "glm" = c(sqrt(diag(vcov(fit.lot2))),NA))
 
-# Bivariate Gamma model-----------------------------------------------------------
+# Bivariate Gamma model-------------------------------------------------
 list_initial = list()
-list_initial$regression <- list("resp1" = coef(fit.lot1), "resp2" = coef(fit.lot2))
+list_initial$regression <- list("resp1" = coef(fit.lot1), 
+                                "resp2" = coef(fit.lot2))
 list_initial$power <- list("resp1" = c(2), "resp2" = c(2))
 list_initial$tau <- list("resp1" = c(0.00149), "resp2" = c(0.001276))
 list_initial$rho = 0.80
 Z0 <- Diagonal(dim(clotting)[1],1)
 
-fit.joint.mcglm <- mcglm(linear_pred = c(lot1 ~ log(u), lot2 ~ log(u)), matrix_pred = list(list(Z0), list(Z0)),
-                        link = c("inverse", "inverse"), variance = c("tweedie", "tweedie"),
-                        data = clotting, control_initial = list_initial,
-                        control_algorithm = list("correct" = TRUE, "method" = "rc", "tunning" = 0.001,
-                                                 max_iter = 100))
+fit.joint.mcglm <- mcglm(linear_pred = c(lot1 ~ log(u), lot2 ~ log(u)), 
+                         matrix_pred = list(list(Z0), list(Z0)),
+                         link = c("inverse", "inverse"), 
+                         variance = c("tweedie", "tweedie"),
+                         data = clotting, control_initial = list_initial,
+                         control_algorithm = list("correct" = TRUE, 
+                                                 "method" = "rc", 
+                                                 "tunning" = 0.001,
+                                                 "max_iter" = 100))
 summary(fit.joint.mcglm)
 plot(fit.joint.mcglm, type = "algorithm")
 plot(fit.joint.mcglm)
 
-# Bivariate Gamma model + log link function --------------------------------------
+# Bivariate Gamma model + log link function ----------------------------
 list_initial = list()
 list_initial$regression <- list("resp1" = c(log(mean(clotting$lot1)),0),
                                 "resp2" = c(log(mean(clotting$lot2)),0))
@@ -147,43 +173,52 @@ list_initial$tau <- list("resp1" = 0.023, "resp2" = 0.024)
 list_initial$rho = 0
 Z0 <- Diagonal(dim(clotting)[1],1)
 
-fit.joint.log <- mcglm(linear_pred = c(lot1 ~ log(u), "resp2" = lot2 ~ log(u)),
-                       matrix_pred = list(list(Z0),list(Z0)), link = c("log", "log"),
-                       variance = c("tweedie", "tweedie"), data = clotting,
+fit.joint.log <- mcglm(linear_pred = c("resp1" = lot1 ~ log(u), 
+                                       "resp2" = lot2 ~ log(u)),
+                       matrix_pred = list(list(Z0),list(Z0)), 
+                       link = c("log", "log"),
+                       variance = c("tweedie", "tweedie"), 
+                       data = clotting,
                        control_initial = list_initial)
 summary(fit.joint.log)
 plot(fit.joint.mcglm, type = "algorithm")
 plot(fit.joint.mcglm)
 
-# Case 4 - Binomial regression models ----------------------------------------
+# Case 4 - Binomial regression models ----------------------------------
 require(MASS)
 data(menarche)
 head(menarche)
-data <- data.frame("resp" = menarche$Menarche/menarche$Total, "Ntrial" = menarche$Total,
+data <- data.frame("resp" = menarche$Menarche/menarche$Total, 
+                   "Ntrial" = menarche$Total,
                    "Age" = menarche$Age)
 
-# Orthodox logistic regression model ------------------------------------------
-glm.out = glm(cbind(Menarche, Total-Menarche) ~ Age, family=binomial(logit), data=menarche)
+# Orthodox logistic regression model -----------------------------------
+glm.out = glm(cbind(Menarche, Total-Menarche) ~ Age, 
+              family=binomial(logit), data=menarche)
 
-# Fitting ---------------------------------------------------------------------
+# Fitting --------------------------------------------------------------
 Z0 <- Diagonal(dim(data)[1],1)
 
-fit.logit <- mcglm(linear_pred = c(resp ~ Age), matrix_pred = list("resp1" = list(Z0)),
-                   link = "logit", variance = "binomialP", Ntrial = list(data$Ntrial), data = data)
+fit.logit <- mcglm(linear_pred = c(resp ~ Age), 
+                   matrix_pred = list("resp1" = list(Z0)),
+                   link = "logit", variance = "binomialP", 
+                   Ntrial = list(data$Ntrial), data = data)
 
 summary(fit.logit)
 plot(fit.logit, type = "algorithm")
 plot(fit.logit)
 
-# Fitting with extra power parameter -------------------------------------------
-fit.logit.power <- mcglm(linear_pred = c(resp ~ Age), matrix_pred = list(list(Z0)),
-                   link = "logit", variance = "binomialP", Ntrial = list(data$Ntrial),
-                   power_fixed = FALSE, data = data)
+# Fitting with extra power parameter -----------------------------------
+fit.logit.power <- mcglm(linear_pred = c(resp ~ Age), 
+                         matrix_pred = list(list(Z0)),
+                         link = "logit", variance = "binomialP", 
+                         Ntrial = list(data$Ntrial),
+                         power_fixed = FALSE, data = data)
 summary(fit.logit.power)
 plot(fit.logit.power, type = "algorithm")
 plot(fit.logit.power)
 
-# All methods --------------------------------------------------------------------
+# All methods ----------------------------------------------------------
 # print method
 fit.logit.power
 # coef method
-- 
GitLab