mirt example for mixture Rasch model with two classes

mirt example for mixture Rasch model with two classes

Adapted from the sirt package examples (data.read).

library(mirt)

data(data.read, package='sirt')
dat <- data.read

# raschmix (in psychomix)
mod8a <- psychomix::raschmix(data= as.matrix(dat) , k = 2, scores = "saturated")
## Error in loadNamespace(x): there is no package called 'psychomix'
summary(mod8a)
## Error in h(simpleError(msg, call)): error in evaluating the argument 'object' in selecting a method for function 'summary': object 'mod8a' not found
# mrm (in mRm)
mod8b <- mRm::mrm(data.matrix=dat, cl=2)
## Error in loadNamespace(x): there is no package called 'mRm'
mod8b$conv.to.bound
## Error in eval(expr, envir, enclos): object 'mod8b' not found
plot(mod8b)
## Error in h(simpleError(msg, call)): error in evaluating the argument 'x' in selecting a method for function 'plot': object 'mod8b' not found
print(mod8b)
## Error in print(mod8b): object 'mod8b' not found
# mirt
#* define theta grid
theta.k <- seq( -5 , 5 , len=9 )
TP <- length(theta.k)
Theta <- matrix( 0 , nrow=2*TP , ncol=4)
Theta[1:TP,1:2] <- cbind(theta.k , 1 )
Theta[1:TP + TP,3:4] <- cbind(theta.k , 1 )
Theta
##        [,1] [,2]  [,3] [,4]
##  [1,] -5.00    1  0.00    0
##  [2,] -3.75    1  0.00    0
##  [3,] -2.50    1  0.00    0
##  [4,] -1.25    1  0.00    0
##  [5,]  0.00    1  0.00    0
##  [6,]  1.25    1  0.00    0
##  [7,]  2.50    1  0.00    0
##  [8,]  3.75    1  0.00    0
##  [9,]  5.00    1  0.00    0
## [10,]  0.00    0 -5.00    1
## [11,]  0.00    0 -3.75    1
## [12,]  0.00    0 -2.50    1
## [13,]  0.00    0 -1.25    1
## [14,]  0.00    0  0.00    1
## [15,]  0.00    0  1.25    1
## [16,]  0.00    0  2.50    1
## [17,]  0.00    0  3.75    1
## [18,]  0.00    0  5.00    1
# define model
I <- ncol(dat) # I = 12
mirtmodel <- mirt::mirt.model("
    F1a = 1-12 # slope
    F1b = 1-12 # difficulty
    F2a = 1-12 # slope
    F2b = 1-12 # difficulty
    CONSTRAIN = (1-12,a1),(1-12,a3)
")

# get initial parameter values
mod.pars <- mirt::mirt(dat, model=mirtmodel , pars = "values")

# set starting values for class specific item probabilities
mod.pars[ mod.pars$name == "d" ,"value" ] <- 0
mod.pars[ mod.pars$name == "d" ,"est" ] <- FALSE
mod.pars[ mod.pars$name == "a1" ,"value" ] <- 1
mod.pars[ mod.pars$name == "a3" ,"value" ] <- 1

# initial values difficulties
b1 <- qlogis( colMeans(dat) )
mod.pars[ mod.pars$name == "a2" ,"value" ] <- b1
mod.pars[ mod.pars$name == "a4" ,"value" ] <- b1 + runif(I , -1 , 1)

#* define prior for mixed Rasch analysis
mixed_prior <- function(Theta,Etable){
    NC <- 2
    
     # number of theta classes
    TP <- nrow(Theta) / NC
    prior1 <- dnorm( Theta[1:TP,1] )
    prior1 <- prior1 / sum(prior1)
    if ( is.null(Etable) ){
     prior <- c( prior1 , prior1 ) }
    if ( ! is.null(Etable) ){
    prior <- ( rowSums( Etable[ , seq(1,2*I,2)] ) +
    rowSums( Etable[,seq(2,2*I,2)]) )/I
    a1 <- aggregate( prior , list( rep(1:NC , each=TP) ) , sum )
    a1[,2] <- a1[,2] / sum( a1[,2])
    
    # print some information during estimation
    cat( paste0( " Class proportions: " ,
    paste0( round(a1[,2] , 3 ) , collapse= " " ) ) , "\n")
    a1 <- rep( a1[,2] , each=TP )
    
    # specify mixture of two normal distributions
    prior <- a1*c(prior1,prior1)
    }
    prior <- prior / sum(prior)
    return(prior)
}

#* estimate model
mod8c <- mirt::mirt(dat, mirtmodel , pars=mod.pars , verbose=FALSE ,
technical = list( customTheta=Theta , customPriorFun = mixed_prior ) )
## EM quadrature for high dimensional models are better handled
##                                  	with the "QMCEM" or "MCEM" method
##  Class proportions: 0.564 0.436 
##  Class proportions: 0.565 0.435 
##  Class proportions: 0.563 0.437 
##  Class proportions: 0.552 0.448 
##  Class proportions: 0.536 0.464 
##  Class proportions: 0.517 0.483 
##  Class proportions: 0.495 0.505 
##  Class proportions: 0.477 0.523 
##  Class proportions: 0.461 0.539 
##  Class proportions: 0.431 0.569 
##  Class proportions: 0.414 0.586 
##  Class proportions: 0.399 0.601 
##  Class proportions: 0.361 0.639 
##  Class proportions: 0.345 0.655 
##  Class proportions: 0.331 0.669 
##  Class proportions: 0.305 0.695 
##  Class proportions: 0.292 0.708 
##  Class proportions: 0.281 0.719 
##  Class proportions: 0.25 0.75 
##  Class proportions: 0.24 0.76 
##  Class proportions: 0.233 0.767 
##  Class proportions: 0.226 0.774 
##  Class proportions: 0.219 0.781 
##  Class proportions: 0.213 0.787 
##  Class proportions: 0.188 0.812 
##  Class proportions: 0.179 0.821 
##  Class proportions: 0.172 0.828 
##  Class proportions: 0.164 0.836 
##  Class proportions: 0.158 0.842 
##  Class proportions: 0.152 0.848 
##  Class proportions: 0.135 0.865 
##  Class proportions: 0.13 0.87 
##  Class proportions: 0.127 0.873 
##  Class proportions: 0.117 0.883 
##  Class proportions: 0.114 0.886 
##  Class proportions: 0.112 0.888 
##  Class proportions: 0.106 0.894 
##  Class proportions: 0.103 0.897 
##  Class proportions: 0.101 0.899 
##  Class proportions: 0.096 0.904 
##  Class proportions: 0.094 0.906 
##  Class proportions: 0.093 0.907 
##  Class proportions: 0.089 0.911 
##  Class proportions: 0.088 0.912 
##  Class proportions: 0.088 0.912 
##  Class proportions: 0.087 0.913 
##  Class proportions: 0.087 0.913 
##  Class proportions: 0.087 0.913 
##  Class proportions: 0.087 0.913 
##  Class proportions: 0.086 0.914 
##  Class proportions: 0.086 0.914 
##  Class proportions: 0.085 0.915 
##  Class proportions: 0.085 0.915 
##  Class proportions: 0.084 0.916 
##  Class proportions: 0.084 0.916 
##  Class proportions: 0.084 0.916 
##  Class proportions: 0.084 0.916 
##  Class proportions: 0.084 0.916 
##  Class proportions: 0.084 0.916 
##  Class proportions: 0.083 0.917 
##  Class proportions: 0.083 0.917 
##  Class proportions: 0.083 0.917 
##  Class proportions: 0.082 0.918 
##  Class proportions: 0.081 0.919 
##  Class proportions: 0.08 0.92 
##  Class proportions: 0.079 0.921 
##  Class proportions: 0.076 0.924 
##  Class proportions: 0.074 0.926 
##  Class proportions: 0.073 0.927 
##  Class proportions: 0.072 0.928 
##  Class proportions: 0.071 0.929 
##  Class proportions: 0.071 0.929 
##  Class proportions: 0.07 0.93 
##  Class proportions: 0.07 0.93 
##  Class proportions: 0.07 0.93 
##  Class proportions: 0.07 0.93 
##  Class proportions: 0.07 0.93 
##  Class proportions: 0.07 0.93 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931 
##  Class proportions: 0.069 0.931
print(mod8c)
## 
## Call:
## mirt::mirt(data = dat, model = mirtmodel, pars = mod.pars, verbose = FALSE, 
##     technical = list(customTheta = Theta, customPriorFun = mixed_prior))
## 
## Full-information item factor analysis with 4 factor(s).
## Converged within 1e-04 tolerance after 105 EM iterations.
## mirt version: 1.39.4 
## M-step optimizer: BFGS 
## EM acceleration: Ramsay 
## Number of rectangular quadrature: 18
## Latent density type: Gaussian 
## 
## Log-likelihood = -1921.524
## Estimated parameters: 48 
## AIC = 3895.048
## BIC = 3993.666; SABIC = 3911.195
## G2 (4069) = 741.08, p = 1
## RMSEA = 0, CFI = NaN, TLI = NaN
# Like in Model 7e, the number of estimated parameters must be included.
nest <- as.integer(sum(mod.pars$est) + 1)

# two class proportions and therefore one probability is freely estimated.
#* extract item parameters
coef(mod8c, simplify=TRUE)
## $items
##        a1      a2    a3     a4 d g u
## A1 11.519  16.200 0.982  2.046 0 0 1
## A2 11.519  14.268 0.982  1.291 0 0 1
## A3 11.519  14.305 0.982  0.311 0 0 1
## A4 11.519  13.243 0.982 -0.199 0 0 1
## B1 11.519  14.861 0.982  1.101 0 0 1
## B2 11.519  12.691 0.982  0.057 0 0 1
## B3 11.519  14.834 0.982  2.848 0 0 1
## B4 11.519  13.928 0.982  0.970 0 0 1
## C1 11.519  -6.951 0.982  4.433 0 0 1
## C2 11.519 -12.525 0.982  1.323 0 0 1
## C3 11.519  -7.048 0.982  2.779 0 0 1
## C4 11.519 -15.197 0.982  1.539 0 0 1
## 
## $means
## F1a F1b F2a F2b 
##   0   0   0   0 
## 
## $cov
##     F1a F1b F2a F2b
## F1a   1   0   0   0
## F1b   0   1   0   0
## F2a   0   0   1   0
## F2b   0   0   0   1
sessionInfo()
## R version 4.2.2 Patched (2022-11-10 r83330)
## Platform: x86_64-pc-linux-gnu (64-bit)
## Running under: Ubuntu 18.04.6 LTS
## 
## Matrix products: default
## BLAS:   /usr/lib/x86_64-linux-gnu/blas/libblas.so.3.7.1
## LAPACK: /usr/lib/x86_64-linux-gnu/lapack/liblapack.so.3.7.1
## 
## locale:
##  [1] LC_CTYPE=en_US.UTF-8       LC_NUMERIC=C              
##  [3] LC_TIME=en_US.UTF-8        LC_COLLATE=en_US.UTF-8    
##  [5] LC_MONETARY=en_US.UTF-8    LC_MESSAGES=en_US.UTF-8   
##  [7] LC_PAPER=en_US.UTF-8       LC_NAME=C                 
##  [9] LC_ADDRESS=C               LC_TELEPHONE=C            
## [11] LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C       
## 
## attached base packages:
## [1] stats4    stats     graphics  grDevices utils     datasets  methods  
## [8] base     
## 
## other attached packages:
## [1] mvtnorm_1.2-2   sirt_3.12-66    ltm_1.2-0       polycor_0.8-1  
## [5] msm_1.7         MASS_7.3-58.2   mirt_1.39.4     lattice_0.20-45
## [9] knitr_1.43     
## 
## loaded via a namespace (and not attached):
##  [1] Rcpp_1.0.10          highr_0.10           compiler_4.2.2      
##  [4] dcurver_0.9.2        tools_4.2.2          evaluate_0.21       
##  [7] lifecycle_1.0.3      gtable_0.3.3         nlme_3.1-162        
## [10] mgcv_1.8-41          rlang_1.1.1          Matrix_1.5-3        
## [13] cli_3.6.1            rstudioapi_0.14      commonmark_1.9.0    
## [16] parallel_4.2.2       TAM_4.1-4            expm_0.999-7        
## [19] xfun_0.39            gridExtra_2.3        cluster_2.1.4       
## [22] admisc_0.32          grid_4.2.2           glue_1.6.2          
## [25] pbapply_1.7-0        survival_3.4-0       CDM_8.2-6           
## [28] GPArotation_2023.3-1 splines_4.2.2        mime_0.12           
## [31] permute_0.9-7        Deriv_4.1.3          markdown_1.7        
## [34] vegan_2.6-4