lmer.cw <- glmer(weight~0+(Time*Diet) + (1+Time|Chick),family="poisson",data=ChickWeight)
summary(lmer.cw)
lmer.cw <- glmer(weight~(Time*Diet) + (1+Time|Chick),family="poisson",data=ChickWeight)
summary(lmer.cw)
library(car)
model1b <- glm(weight~Time:Diet,family=quasipoisson(link="log"),data=ChickWeight)
model1b
Anova(model1b)
summary(model1b)
model2b <- glm(weight~Time*Diet,family=quasipoisson(link="log"),data=ChickWeight)
summary(model2b)
Anova(model2b)
anova(model1b,model2b,test="Chisq")
#Anova(model1b,model2b)
library(dplyr)
library(tidyr)
data(ChickWeight)
newChick <- pivot_wider(as.data.frame(ChickWeight),names_from=Time,values_from=weight) %>% dplyr::select("Chick","Diet",early="6",end="21")
newChick$fryer <- 0+(newChick$end > 250)
newChick<- newChick[!is.na(newChick$end),]
newChick$fryer[is.na(newChick$fryer)] <- 0
gg1 <- glm(fryer~early+Diet,data=newChick,family=binomial)
gg2 <- glm(fryer~early,     data=newChick,family=binomial)
gg3 <- glm(fryer~Diet,      data=newChick,family=binomial)
library(lmtest)
anova(gg1,gg2,test="Chisq")
waldtest(gg1,gg2,test="Chisq")
anova(gg1,gg3,test="Chisq")
waldtest(gg1,gg3,test="Chisq")
gg1 <- glm(fryer~early+Diet,data=newChick,family=binomial)
gg2 <- glm(fryer~early,     data=newChick,family=binomial)
gg3 <- glm(fryer~Diet,      data=newChick,family=binomial)
library(lmtest)
anova(gg1,gg2,test="Chisq")
waldtest(gg1,gg2,test="Chisq")
anova(gg1,gg3,test="Chisq")
waldtest(gg1,gg3,test="Chisq")
library(lme4)
library(car)
contrasts(ChickWeight$Diet) <- contr.helmert(4)
lmer.cw <- glmer(weight~(Time*Diet) + (1+Time|Chick),family="poisson",data=ChickWeight)
summary(lmer.cw)
VarCorr(lmer.cw)
Anova(lmer.cw)
library(GGally)
#confint(lmer.cw)  ##this takes a while
co <- coef(lmer.cw)$Chick
head(co)
round(cor(co),3)
colnames(co) <- c("intercept","time","diet2","diet3","diet4","timediet2","timediet3","timediet4")
ggpairs(co[,1:2])
library(lme4)
library(car)
contrasts(ChickWeight$Diet) <- contr.helmert(4) #use orthogonal contrasts.
ChickWeight$Time <- ChickWeight$Time - mean(ChickWeight$Time)
lmer.cw <- glmer(weight~(Time*Diet) + (1+Time|Chick),family="poisson",data=ChickWeight)
summary(lmer.cw)
VarCorr(lmer.cw)
Anova(lmer.cw)
library(lme4)
library(car)
data(ChickWeight)
contrasts(ChickWeight$Diet) <- contr.helmert(4) #use orthogonal contrasts.
#ChickWeight$Time <- ChickWeight$Time - mean(ChickWeight$Time)
lmer.cw <- glmer(weight~(Time*Diet) + (1+Time|Chick),family="poisson",data=ChickWeight)
summary(lmer.cw)
VarCorr(lmer.cw)
Anova(lmer.cw)
library(lme4)
library(car)
data(ChickWeight)
contrasts(ChickWeight$Diet) <- contr.helmert(4) #use orthogonal contrasts.
ChickWeight$Time <- ChickWeight$Time - mean(ChickWeight$Time)
lmer.cw <- glmer(weight~(Time*Diet) + (1+Time|Chick),family="poisson",data=ChickWeight)
summary(lmer.cw)
VarCorr(lmer.cw)
Anova(lmer.cw)
library(tidyverse)
library(ggplot2)
library(readr)
dat <- read_csv("lebron.csv")
dat |> mutate(timeinperiod = minutes_remaining*60+seconds_remaining,
timeingame=(period-1)*12+timeinperiod,
year = substr(game_date,1,4)) -> dat
dat |> group_by(shot_distance,team_name) |> summarize(prob=mean(shot_made_numeric)) |>
ggplot(aes(x=shot_distance,y=prob,color=team_name,group=team_name)) +geom_point() + geom_smooth() + theme_bw() +
ggtitle("Lebron James Shot Accuracy by distance from basket")
lj1 <- glm(shot_made_numeric~shot_distance+team_name,family=binomial(),data=dat)
summary(lj1)
Anova(lj1)
lj3 <- glm(shot_made_numeric~shot_distance+team_name +shot_zone_basic + shot_type+ action_type,family=binomial(),data=dat)
summary(lj3)
Anova(lj3)
lj4 <- glm(shot_made_numeric~team_name +shot_zone_basic + action_type,family=binomial(),data=dat)
dat$fit <- lj3$fitted.values
dat |> group_by(shot_distance,team_name) |> summarize(prob=mean(shot_made_numeric),fit=mean(fit)) |>
ggplot(aes(x=shot_distance,y=prob,color=team_name,group=team_name)) +geom_point() +
geom_line(aes(y=fit))+theme_bw() +
ggtitle("Lebron James Shot Accuracy by distance from basket (Model does not use distance)")
table(dat$action_type,dat$shot_type)
dat$d2 <- floor(dat$shot_distance/10)*5
table(dat$action_type,dat$d2) -> xx
waldtest(lj3,lj4)
waldtest(lj1,lj3)
knitr::opts_chunk$set(echo = TRUE)
dat <- read.csv("TaskRatings.csv")
longmat <- rbind(as.matrix(dat[,3:10]),
as.matrix(dat[,11:18]),
as.matrix(dat[,19:26]),deparse.level=2)
longdat <- data.frame(task=dat$Task,
rater= as.factor(rep(1:3,each=nrow(dat))),
longmat)
library(car)
model1 <- lm(as.matrix(longdat[3:10])~longdat$task+longdat$rater)
Anova(model1)
Anova(model1,test.statistic = "Roy")
library(car)
model1 <- lm(as.matrix(longdat[,3:10])~longdat$task+longdat$rater)
Anova(model1)
model2 <- lm(as.matrix(longdat[,5:9])~0+longdat$task+longdat$rater)
summary(model2)
Manova(model2)
model3 <- lm(as.matrix(longdat[,5:9])~longdat$rater+longdat$FM.1)
#summary(model3)
Manova(model3)
anova(model2,model3)
summary(model3)
Manova(model3)
model3 <- lm(as.matrix(longdat[,5:9])~longdat$rater+longdat$FM.1)
model3a <- lm(as.matrix(longdat[,5:9])~longdat$rater+longdat$task+ longdat$FM.1)
model3b <- lm(as.matrix(longdat[,5:9])~longdat$rater+longdat$task)
model3c <- lm(as.matrix(longdat[,5:9])~longdat$rater+longdat$FM.1)
anova(model3)
anova(model3a,model3c) ## effect of task after FM
anova(model3a,model3b) ## effect of FM after task
library(tidyverse)
library(nnet)
statedat  <- tibble(as_tibble(state.x77),Region=state.region) |>
rename_with(~gsub(" ","",.)) |> mutate(Region=as.factor(Region))
m <- multinom(Region ~ Population+Income + Illiteracy + Frost + Area + LifeExp + Murder+HSGrad ,data=statedat)
m
table(statedat$Region,predict(m))
m2 <-  multinom(Region ~ Population+  Illiteracy + Frost + Area + LifeExp + Murder+HSGrad ,data=statedat)
m2
table(statedat$Region,predict(m2))
m2 <-  multinom(Region ~ Population+  Illiteracy + Frost +  LifeExp + Murder+HSGrad ,data=statedat)
m2
table(statedat$Region,predict(m2))
m2 <-  multinom(Region ~ Population+  Illiteracy +   LifeExp + Murder+HSGrad ,data=statedat)
m2
table(statedat$Region,predict(m2))
m2 <-  multinom(Region ~ Population+  Illiteracy +   Frost+ LifeExp + Murder+HSGrad ,data=statedat)
m2
table(statedat$Region,predict(m2))
m2 <-  multinom(Region ~ Population+  Illiteracy +   Frost+  Murder+HSGrad ,data=statedat)
m2
table(statedat$Region,predict(m2))
m2 <-  multinom(Region ~   Illiteracy +   Frost+ LifeExp + Murder+HSGrad ,data=statedat)
m2
table(statedat$Region,predict(m2))
m2 <-  multinom(Region ~   Illiteracy +   Frost+ LifeExp + Murder ,data=statedat)
m2
table(statedat$Region,predict(m2))
m2 <-  multinom(Region ~   Illiteracy +   Frost+ LifeExp  +HSGrad ,data=statedat)
m2
table(statedat$Region,predict(m2))
m2 <-  multinom(Region ~   Illiteracy +   Frost+  Murder +HSGrad ,data=statedat)
m2
table(statedat$Region,predict(m2))
##this is the best AIC I could find:
m2 <-  multinom(Region ~   Illiteracy +   Frost+ LifeExp + Murder +HSGrad ,data=statedat)
m2
table(statedat$Region,predict(m2))
anova(m2)
m3 <-  multinom(Region ~   Illiteracy +   Frost+ LifeExp + Murder ,data=statedat)
anova(m2,m3)
m.LifeExp <-  multinom(Region ~   Illiteracy +   Frost+ Murder+HSGrad ,data=statedat)
anova(m2,m.LifeExp)
m.HSGrad <-  multinom(Region ~   Illiteracy +   Frost+ LifeExp + Murder ,data=statedat)
anova(m2,m.HSGrad)
m.Murder <-  multinom(Region ~   Illiteracy +   Frost+ LifeExp + HSGrad ,data=statedat)
anova(m2,m.Murder)
m.Frost <-  multinom(Region ~   Illiteracy +    LifeExp + Murder+HSGrad ,data=statedat)
anova(m2,m.Frost)
m.LifeExp <-  multinom(Region ~   Illiteracy +   Frost+ Murder+HSGrad ,data=statedat)
anova(m2,m.LifeExp)
m.Illiteracy <-  multinom(Region ~   Frost+ LifeExp + Murder+HSGrad ,data=statedat)
anova(m2,m.Illiteracy)
knitr::opts_chunk$set(echo = TRUE)
dat <- read.csv("TaskRatings.csv")
longmat <- rbind(as.matrix(dat[,3:10]),
as.matrix(dat[,11:18]),
as.matrix(dat[,19:26]),deparse.level=2)
longdat <- data.frame(task=dat$Task,
rater= as.factor(rep(1:3,each=nrow(dat))),
longmat)
library(car)
model1 <- lm(as.matrix(longdat[3:10])~longdat$task+longdat$rater)
Anova(model1)
Anova(model1,test.statistic = "Roy")
library(car)
model1 <- lm(as.matrix(longdat[,3:10])~longdat$task+longdat$rater)
Anova(model1)
model2 <- lm(as.matrix(longdat[,5:9])~0+longdat$task+longdat$rater)
summary(model2)
Manova(model2)
model3 <- lm(as.matrix(longdat[,5:9])~longdat$rater+longdat$FM.1)
#summary(model3)
Manova(model3)  ##FM.1 does matter
##dig deeper with these:
#anova(model2,model3)
#summary(model3)
model3 <- lm(as.matrix(longdat[,5:9])~longdat$rater+longdat$FM.1)
model3a <- lm(as.matrix(longdat[,5:9])~longdat$rater+longdat$task+ longdat$FM.1)
model3b <- lm(as.matrix(longdat[,5:9])~longdat$rater+longdat$task)
model3c <- lm(as.matrix(longdat[,5:9])~longdat$rater+longdat$FM.1)
anova(model3)
anova(model3a,model3c) ## effect of task after FM
anova(model3a,model3b) ## effect of FM after task
library(tidyverse)
library(nnet)
statedat  <- tibble(as_tibble(state.x77),Region=state.region) |>
rename_with(~gsub(" ","",.)) |> mutate(Region=as.factor(Region))
m <- multinom(Region ~ Population+Income + Illiteracy + Frost + Area + LifeExp + Murder+HSGrad ,data=statedat)
m
table(statedat$Region,predict(m))
##this is the best AIC I could find:
m2 <-  multinom(Region ~   Illiteracy +   Frost+ LifeExp + Murder +HSGrad ,data=statedat)
m2
table(statedat$Region,predict(m2)) ##no errors!
m.HSGrad <-  multinom(Region ~   Illiteracy +   Frost+ LifeExp + Murder ,data=statedat)
anova(m2,m.HSGrad)
m.Murder <-  multinom(Region ~   Illiteracy +   Frost+ LifeExp + HSGrad ,data=statedat)
anova(m2,m.Murder)
m.Frost <-  multinom(Region ~   Illiteracy +    LifeExp + Murder+HSGrad ,data=statedat)
anova(m2,m.Frost)
m.LifeExp <-  multinom(Region ~   Illiteracy +   Frost+ Murder+HSGrad ,data=statedat)
anova(m2,m.LifeExp)
m.Illiteracy <-  multinom(Region ~   Frost+ LifeExp + Murder+HSGrad ,data=statedat)
anova(m2,m.Illiteracy)
setwd("~/Dropbox/courses/5220-s2025b/web-5220/psy5220/daily/Day09_MANOVA")
library(ggplot2)
library(tidyverse)
contrasts(simdat$question) <- contr.sum(levels(simdat$question))
library(knitr)
library(rmdformats)
## Global options
options(max.print="75")
opts_chunk$set(echo=TRUE,
cache=TRUE,
prompt=FALSE,
tidy=TRUE,
comment=NA,
message=FALSE,
warning=FALSE)
opts_knit$set(width=75)
logodds <- function(p) {log(p/1-p)} ##The probability of a "yes" for a given set of predictor values.
logit <- function(lo) {1/(1+exp(-lo))} ##This is the inverse of the logodds
set.seed(1009)
numsubs <- 50
numqs <- 20
skilllevel <- rnorm(numsubs)
questiondiff <- rnorm(numqs)
combined <- outer(skilllevel,questiondiff,function(x,y){x-y})
pcorrect <- logit(combined)
pcorrect.2 <- logit(combined+2) ## An easier test with the same subjects and problems.
sim1 <- pcorrect > runif(numsubs*numqs)
sim2 <- pcorrect.2 > runif(numsubs*numqs)
simdat <- data.frame(sub=factor(rep(1:numsubs,numqs)),
question = factor(rep(1:numqs,each=numsubs)),
corr =as.vector(sim1)+0)
simdat.2 <- data.frame(sub=factor(rep(1:numsubs,numqs)),
question = factor(rep(1:numqs,each=numsubs)),
corr =as.vector(sim2)+0)
model <- glm(corr~0+sub+question,family=binomial(),data=simdat)
summary(model)
anova(model,test="Chisq")
model2 <- glm(corr~0+sub+question,family=binomial(),data=simdat.2)
summary(model2)
library(ggplot2)
library(tidyverse)
contrasts(simdat$question) <- contr.sum(levels(simdat$question))
contrasts(simdat.2$question) <- contr.sum(levels(simdat.2$question))
model <- glm(corr~0+sub+question,family=binomial(),data=simdat)
model2 <- glm(corr~0+sub+question,family=binomial(),data=simdat.2)
items <- data.frame(names=names(model$coefficients[51:69]),
model1=model$coefficients[51:69],
model2 = model2$coefficients[51:69])
knitr::kable(items)
items %>%  ggplot(aes(x=model1,y=model2)) + geom_point() + geom_abline(intercept=0,slope=1) + theme_bw() + ggtitle("Item parameters")
## Person coefficients
person <-  data.frame(names=names(model$coefficients[1:50]),
model1=model$coefficients[1:50],
model2 = model2$coefficients[1:50])
knitr::kable(person)
person %>%  ggplot(aes(x=model1,y=model2)) + geom_point() + geom_abline(intercept=0,slope=1) + theme_bw() + ggtitle("Person parameters")
library(ggplot2)
quickplot(x=model$coef[1:numsubs],y=skilllevel)+
geom_point(size=4)+
xlab("Estimated Model coefficient") +
ylab("Person Ability") + geom_label(label=1:numsubs) + theme_bw()
cor(model$coef[1:numsubs],skilllevel)
itempars <- c((model$coef[-(1:numsubs)]), -mean(model$coef[-(1:numsubs)]))
itempars2 <- c((model2$coef[-(1:numsubs)]), -mean(model2$coef[-(1:numsubs)]))
qplot(x=itempars,y=questiondiff)+
geom_label(label=1:length(questiondiff))+
xlab("Estimated Model coefficient") + ylab("Question difficulty") + theme_bw()
cor(itempars,questiondiff)
library(gridExtra)
rowMeans(sim1)
colMeans(sim1)
grid.arrange(
qplot(rowMeans(sim1),model$coef[1:numsubs],xlab="Person accuracy",ylab="Person ability parameter") + theme_bw(),
qplot(colMeans(sim1),itempars,xlab="Question accuracy",ylab="Item difficulty parameter")+ theme_bw(),ncol=2)
grid.arrange(
qplot(rowMeans(sim2),model2$coef[1:numsubs],xlab="Person accuracy",ylab="Person ability parameter")+theme_bw(),
qplot(colMeans(sim2),itempars2,xlab="Question accuracy",ylab="Item difficulty parameter")+theme_bw(),ncol=2)
library(ltm)
p1 <- sim1+0
p2 <- sim2+0
irt1 <- rasch(p1)
irt2 <- rasch(p2)
summary(irt1)
summary(irt2)
##this is an alternative to alpha in psych package
descript(p1)
plot(itempars,irt1$coef[,1],main=
"Comparison of model Item coefficients",xlab="Logistic coefficients",ylab="IRT coefficients")
abline(0,1)
plot(itempars2,irt2$coef[,1],main=
"Comparison of model Item coefficients",xlab="Logistic coefficients",ylab="IRT coefficients")
abline(0,1)
plot(irt1)
plot(irt2)
set.seed(10010)
irt2 <- rasch(sim2+0)
sim3 <- sim2
sim3[,1:5] <- (runif(5*numsubs)<.5 )+0
irt3 <- rasch(sim3)
summary(irt2)
summary(irt3)
plot((cbind(irt1$coef[,1],irt3$coef[,1])),main="Item coefficients with bad questions",xlab="test 2",ylab="test 3")
plot((cbind(irt1$coef[,1],irt3$coef[,1])),main="Item coefficients with bad questions (zoomed)",xlab="test 2",ylab="test 3",ylim=c(-5,5))
item.fit(irt2)
item.fit(irt3)
person.fit(irt2)
person.fit(irt3)
person.fit(irt2)
print(person.fit(irt2))
dat <- read.csv("testscores.csv")
##descript(dat) ##doesn't work. Thus compute Cronbach's alpha on the data
descript(dat,chi.squared=F)
#force the discrimination parameter to be 1
model1 <- rasch(dat,constraint=cbind(length(dat) + 1, 1))
model1
#summary(model1)
#allow discrimination parameter to be estimated
model2 <- rasch(dat)
model2
#summary(model2)
par(mfrow=c(1,2))
plot(model1)
plot(model2)
anova(model1,model2)
model3 <- ltm(dat~z1)
model3
plot(model3)
#summary(model3)
item.fit(model3)
psych::alpha(dat)
person.fit(model3)
margins(model3)
table(dat[,13],dat[,37])
par(mfrow=c(2,2))
boxplot(dat$q5,rowMeans(dat),main="Correct on q5",names=c("Incorrect (3)","correct (18)"))
boxplot(dat$q4,rowMeans(dat),main="Correct on q4",names=c("Incorrect (1)","correct (20)"))
boxplot(dat$q10,rowMeans(dat),main="Correct on q10",names=c("Incorrect (3)","correct (18)"))
boxplot(dat$q11,rowMeans(dat),main="Correct on q11",names=c("Incorrect (13)","correct (8)"))
model9 <- tpm(dat[,1:15],type="latent.trait",max.guessing =.5)
model9
plot(model9)
big5 <- read.csv("bigfive.csv")
qtype <- c("E","A","C","N","O", "E","A","C","N","O",
"E","A","C","N","O", "E","A","C","N","O",
"E","A","C","N","O", "E","A","C","N","O",
"E","A","C","N","O", "E","A","C","N","O",
"O","A","C","O")
valence <- c(1,-1,1,1,1,  -1,1,-1,-1,1,
1,-1,1,1,1,  1,1,-1,1,1,
-1,1,-1,-1,1,  1,-1,1,1,1,
-1,1,1,-1,-1,  1,-1,1,1,1,
-1,1,-1,1)
##reverse code
for(i in 2:ncol(big5))
{
if(valence[i-1]==-1)
{
big5[,i] <- 6-big5[,i]
}
}
ei <- big5[,c(T,qtype=="E")]
ei <- ei[!is.na(rowSums(ei)),]
g1 <-  grm(ei[,-1], constrained = TRUE)
g1
summary(g1)
par(mfrow=c(4,2))
plot(g1,items=1)
plot(g1,items=2)
plot(g1,items=3)
plot(g1,items=4)
plot(g1,items=5)
plot(g1,items=6)
plot(g1,items=7)
plot(g1,items=8)
margins(g1)
g2 <-  grm(ei[,-1], constrained = FALSE)
g2
summary(g2)
par(mfrow=c(4,2))
plot(g2,items=1)
plot(g2,items=2)
plot(g2,items=3)
plot(g2,items=4)
plot(g2,items=5)
plot(g2,items=6)
plot(g2,items=7)
plot(g2,items=8)
model4a <- ltm(dat[,1:15]~z1)
plot(model4a)
model4a
#summary(model4)
item.fit(model4a)
model4b <- ltm(dat[,1:15]~z1+z2)
model4b
anova(model4a,model4b)
#item.fit(model4b)
fs <- factor.scores(model4b)
barplot(t(fs$coef),beside=T)
plot(fs$coef[,2],fs$coef[,3])
set.seed(10010)
irt2 <- rasch(sim2+0)
sim3 <- sim2
sim3[,1:5] <- (runif(5*numsubs)<.5 )+0
irt3 <- rasch(sim3)
summary(irt2)
summary(irt3)
plot((cbind(irt1$coef[,1],irt3$coef[,1])),main="Item coefficients with bad questions",xlab="test 2",ylab="test 3")
plot((cbind(irt1$coef[,1],irt3$coef[,1])),main="Item coefficients with bad questions (zoomed)",xlab="test 2",ylab="test 3",ylim=c(-5,5))
model2 <- rasch(dat)
dat <- read.csv("testscores.csv")
##descript(dat) ##doesn't work. Thus compute Cronbach's alpha on the data
descript(dat,chi.squared=F)
#force the discrimination parameter to be 1
model1 <- rasch(dat,constraint=cbind(length(dat) + 1, 1))
model1
#summary(model1)
#allow discrimination parameter to be estimated
model2 <- rasch(dat)
model2
#summary(model2)
par(mfrow=c(1,2))
plot(model1)
plot(model2)
rasch
?optim
options(max.print=200)
knitr::opts_chunk$set(echo = TRUE)
data <- read.csv("http://pages.mtu.edu/~shanem/psy5220/daily/Day10-11/crossword.csv")
qs <- read.csv("http://pages.mtu.edu/~shanem/psy5220/daily/Day10-11/answers.csv")
library(ltm)
responses <- data[,-(1:5)]
d <- descript(responses,chi.squared=F,n.print=100)
d
plot(d)
m1 <- rasch(responses)
m1
BIC(m1)
plot(m1)
plot(m1,type="IIC")
plot(m1,type="IIC",items=0)
margins(m1)
plot(d,items=c(13,44))
m2 <- ltm(responses~z1)
coef(m2)
BIC(m2)  #The smaller the better
par(mfrow=c(1,2))
plot(coef(m1)[,1],coef(m2)[,1],main="Difficulty")
plot(coef(m1)[,1],coef(m2)[,2],main="Discriminability")
anova(m1,m2)
plot(m2)
plot(m2,type="IIC")
plot(m2,type="IIC",items=0)
m3 <- ltm(responses~z1+z2)
coef(m3)
BIC(m3)
##compare intercept to model1 difficulty:
par(mfrow=c(1,3))
plot(coef(m1)[,1],coef(m3)[,1])
plot(coef(m1)[,1],coef(m3)[,2])
plot(coef(m1)[,1],coef(m3)[,3])
plot(coef(m2)[,2],coef(m3)[,2])
plot(coef(m2)[,2],coef(m3)[,3])
coef(m1)
coef(m2)
coef(m3)
str(m3)
m3$IRT.param
str(m1)
m1$GH
dim(data)
str(m1)
m1$patterns
