tab2 <- data.frame(Keyword=names(tab),Count=tab)
knitr::kable((tab2[1:10,]))
}
cl <- p$clustering
#topwords <- matrix("",ncol=6,nrow=20)
for (cluster in levels(factor(cl)))
{
dat.tmp <- dat[cl==cluster,]
keymat.tmp <- mat[cl==cluster,] >0
keywords_in_cluster <- c()
for(doc in 1:nrow(keymat.tmp))
{
keywords_in_cluster <- c(keywords_in_cluster,wordlist[keymat.tmp[doc,]])
}
print(paste("Cluster",cluster))
tab <-table(keywords_in_cluster)
tab <- tab[tab>2]
tab <- tab[order(-tab)]
tab2 <- data.frame(Keyword=names(tab),Count=tab)
knitr::kable((tab2))
}
knitr::kable(tab2,"pipe")
tab
tab$values
as.numeric(tab)
tab2 <- data.frame(Keyword=names(tab),Count=as.numeric(tab))
knitr::kable(tab2,"pipe")
cl <- p$clustering
#topwords <- matrix("",ncol=6,nrow=20)
for (cluster in levels(factor(cl)))
{
dat.tmp <- dat[cl==cluster,]
keymat.tmp <- mat[cl==cluster,] >0
keywords_in_cluster <- c()
for(doc in 1:nrow(keymat.tmp))
{
keywords_in_cluster <- c(keywords_in_cluster,wordlist[keymat.tmp[doc,]])
}
print(paste("Cluster",cluster))
tab <-table(keywords_in_cluster)
tab <- tab[tab>2]
tab <- tab[order(-tab)]
tab2 <- data.frame(Keyword=names(tab),Count=as.numeric(tab))
knitr::kable(tab2,"pipe", caption=paste("Cluster",cluster))
}
cl <- p$clustering
#topwords <- matrix("",ncol=6,nrow=20)
for (cluster in levels(factor(cl)))
{
dat.tmp <- dat[cl==cluster,]
keymat.tmp <- mat[cl==cluster,] >0
keywords_in_cluster <- c()
for(doc in 1:nrow(keymat.tmp))
{
keywords_in_cluster <- c(keywords_in_cluster,wordlist[keymat.tmp[doc,]])
}
print(paste("Cluster",cluster))
tab <-table(keywords_in_cluster)
tab <- tab[tab>2]
tab <- tab[order(-tab)]
tab2 <- data.frame(Keyword=names(tab),Count=as.numeric(tab))
knitr::kable(tab2,"pipe", caption=paste("Cluster",cluster))
}
print(knitr::kable(tab2,"pipe", caption=paste("Cluster",cluster)))
cl <- p$clustering
#topwords <- matrix("",ncol=6,nrow=20)
for (cluster in levels(factor(cl)))
{
dat.tmp <- dat[cl==cluster,]
keymat.tmp <- mat[cl==cluster,] >0
keywords_in_cluster <- c()
for(doc in 1:nrow(keymat.tmp))
{
keywords_in_cluster <- c(keywords_in_cluster,wordlist[keymat.tmp[doc,]])
}
print(paste("Cluster",cluster))
tab <-table(keywords_in_cluster)
tab <- tab[tab>2]
tab <- tab[order(-tab)]
tab2 <- data.frame(Keyword=names(tab),Count=as.numeric(tab))
print(knitr::kable(tab2,"pipe", caption=paste("Cluster",cluster)))
}
library(cluster)
c <- clara(dd,k=5)
clusplot(c)
p <- pam(dd,k=4)
plot(p)
cl <- p$clustering
#topwords <- matrix("",ncol=6,nrow=20)
for (cluster in levels(factor(cl)))
{
dat.tmp <- dat[cl==cluster,]
keymat.tmp <- mat[cl==cluster,] >0
keywords_in_cluster <- c()
for(doc in 1:nrow(keymat.tmp))
{
keywords_in_cluster <- c(keywords_in_cluster,wordlist[keymat.tmp[doc,]])
}
print(paste("Cluster",cluster))
tab <-table(keywords_in_cluster)
tab <- tab[tab>2]
tab <- tab[order(-tab)]
tab2 <- data.frame(Keyword=names(tab),Count=as.numeric(tab))
print(knitr::kable(tab2,"pipe", caption=paste("Cluster",cluster)))
}
print(tab2)
ggpoairs
??ggpairs
library(cluster)
c <- clara(dd,k=5)
clusplot(c)
p <- pam(dd,k=4)
plot(p)
version
c(-76401,41586)/8
library(effectsize)
?effectsize
?glmnet
install.packages('glmnet')
#define response variable
y <- mtcars$hp
#define matrix of predictor variables
x <- data.matrix(mtcars[, c('mpg', 'wt', 'drat', 'qsec')])
lm1 <- lm(y~x)
lm1
library(glmnet)
#perform k-fold cross-validation to find optimal lambda value
cv_model <- cv.glmnet(x, y, alpha = 1)
#find optimal lambda value that minimizes test MSE
best_lambda <- cv_model$lambda.min
best_lambda
cv_model
best_model <- glmnet(x, y, alpha = 1, lambda = best_lambda)
coef(best_model)
summary(lm1)
knitr::opts_chunk$set(echo = TRUE)
lm2 <- aov(logRT~target + hand + forback*updown, data=data)
knitr::opts_chunk$set(echo = TRUE)
data <- read.csv("manikin.csv")
data <- data[data$block != "PRACTICE",]
data$forback <- c("Backward","X","Forward")[data$forback+2]
data$updown <- c("Down","X","Up")[data$updown+2]
data$logRT <- log(data$rt)
library(car)
lm1 <-  lm(logRT~target+forback+updown+hand,data=data)
summary(lm1)
Anova(lm1)
knitr::opts_chunk$set(echo = TRUE)
data <- read.csv("manikin.csv")
data <- data[data$block != "PRACTICE",]
data$forback <- c("Backward","X","Forward")[data$forback+2]
data$updown <- c("Down","X","Up")[data$updown+2]
data$logRT <- log(data$rt)
library(car)
lm1 <-  lm(logRT~target+forback+updown+hand,data=data)
summary(lm1)
Anova(lm1)
lm2 <- aov(logRT~target + hand + forback*updown, data=data)
summary(lm2)
lm2 <- lm(logRT~target + hand + forback*updown, data=data)
summary(lm2)
data <- read.csv("manikin.csv")
data <- data[data$block != "PRACTICE",]
data$forback <- c("Backward","X","Forward")[data$forback+2]
data$updown <- c("Down","X","Up")[data$updown+2]
data$logRT <- log(data$rt)
library(car)
lm1 <-  lm(logRT~target+forback+updown+hand,data=data)
summary(lm1)
Anova(lm1)
##code these as a factor.
data$subnum <- factor(data$subnum)
contrasts(data$subnum) <- contr.poly(levels(data$subnum))
lm4 <- aov(logRT~  target + hand * forback*updown+  block*subnum, data=data)
Anova(lm4,type="II")
effectsize::eta_squared(lm4)
data <- read.csv("manikin.csv")
data <- data[data$block != "PRACTICE",]
data$forback <- c("Backward","X","Forward")[data$forback+2]
data$updown <- c("Down","X","Up")[data$updown+2]
data$logRT <- log(data$rt)
Anova(lm4,type="II")
table(data$subnum)
length(table(data$subnum))
aov.error <- aov(logRT~  target + forback*updown+  block + Error(subnum/(forback*updown + block + target)), data=data)
summary(aov.error1)
aov.error1 <- aov(logRT~  target + forback*updown+  block + Error(subnum/(forback*updown + block + target)), data=data)
summary(aov.error1)
summary(aov.error1, test=F)
aov.error1 <- aov(logRT~  forback*updown+  block + target+ Error(subnum/(forback*updown)), data=data)
summary(aov.error1, test=F)
aov.error1 <- aov(logRT~  forback*updown+  target+ Error(subnum/(forback*updown)), data=data)
summary(aov.error1, test=F)
aov.error1 <- aov(logRT~  forback*updown+  Error(subnum/(forback*updown)), data=data)
summary(aov.error1, test=F)
anova(aov.error1, test=F)
Anova(aov.error1, test=F)
data$subnum <- as.factor(data$subnum)
aov.error1 <- aov(logRT~  forback*updown+  Error(subnum/(forback*updown)), data=data)
summary(aov.error1, test=F)
knitr::opts_chunk$set(echo = TRUE)
data <- read.csv("manikin.csv")
data <- data[data$block != "PRACTICE",]
data$forback <- c("Backward","X","Forward")[data$forback+2]
data$updown <- c("Down","X","Up")[data$updown+2]
data$logRT <- log(data$rt)
library(car)
lm1 <-  lm(logRT~target+forback+updown+hand,data=data)
summary(lm1)
Anova(lm1)
lm2 <- lm(logRT~target + hand + forback*updown, data=data)
summary(lm2)
Anova(lm2,type="II")
Anova(lm2,type="III")
tab <- aggregate(data$logRT,list(forback=data$forback,updown=data$updown),function(x){exp(mean(x))})
library(ggplot2)
tab |> ggplot(aes(x=(forback),color=(updown),y=x, group=updown)) + geom_point() + geom_line() + theme_bw()
plot(data$trial,data$logRT)
lm3 <- aov(logRT~target + hand *forback*updown+block, data=data)
Anova(lm3,type="II")
Anova(lm3,type="III")
library(sjstats)
effectsize::eta_squared(lm3)
##code these as a factor.
data$subnum <- factor(data$subnum)
contrasts(data$subnum) <- contr.poly(levels(data$subnum))
lm4 <- aov(logRT~  target + hand * forback*updown+  block*subnum, data=data)
Anova(lm4,type="II")
effectsize::eta_squared(lm4)
data$subnum <- as.factor(data$subnum)
aov.error1 <- aov(logRT~  forback*updown+  Error(subnum/(forback*updown)), data=data)
summary(aov.error1, test=F)
data$subnum <- as.factor(data$subnum)
aov.error1 <- aov(logRT~  forback*updown+ target+ Error(subnum/(forback*updown+target)), data=data)
summary(aov.error1, test=F)
aov.error2 <- aov(logRT~  target+ forback*updown+Error(subnum/(forback*updown+target)), data=data)
summary(aov.error2, test=F)
library(ez)
install.packages("ez")
library(ez)
ezANOVA(data=data,dv=logRT,wid=subnum,within=.(updown,target,forback))
data$forback <- as.factor(data$forback)
data$updown <- as.factor(data$updown)
data$target <- as.factor(data$target)
ezANOVA(data=data,dv=logRT,wid=subnum,within=.(updown,target,forback))
?ezANOVA
x <- ezANOVA(data=data,dv=logRT,wid=subnum,within=.(updown,target,forback),return_aov=TRUE)
print(x)
data$subnum <- as.factor(data$subnum)
aov.error1 <- aov(logRT~  forback*updown*target+ Error(subnum/(forback*updown*target)), data=data)
summary(aov.error1, test=F)
x
summary(x)
set.seed(500)
x1 <- factor(rep(c("a","b","c"),20*5))
subj <- factor(rep(1:20,each=3*5))
rt <- 200 + runif(20)[subj]*100+ c(-50,0,100)[x1] + rnorm(300) * 30
boxplot(rt~x1)
points(as.numeric(x1),rt,cex=2,col="grey")
data2 <- data.frame(subj=rep(subj,each=5),
cond = rep(x1,each=5),
rt = rep(rt,each=5) + rnorm(length(rt)*5))
set.seed(100)
data2 <- data.frame(subj=rep(subj,each=5),
cond = rep(x1,each=5),
rt = rep(rt,each=5) + rnorm(length(rt)*5))
data2[1:10,]
##compare the aggregated vs non-aggergated versions:
aov2.a <- aov(rt~cond+Error(subj/cond),data=data2)
aov2.b <- aov(rt~cond+Error(subj/cond),data=data2b)
summary(aov2.a)
summary(aov2.b)
##compare the aggregated vs non-aggergated versions:
aov2.a <- aov(rt~cond+Error(subj/cond),data=data2)
aov2.b <- aov(rt~cond+Error(subj/cond),data=data2b)
data2b  <- aggregate(data2$rt,list(subj=data2$subj,cond=data2$cond),mean)
data2b$rt <- data2b$x
##compare the aggregated vs non-aggergated versions:
aov2.a <- aov(rt~cond+Error(subj/cond),data=data2)
aov2.b <- aov(rt~cond+Error(subj/cond),data=data2b)
summary(aov2.a)
summary(aov2.b)
model3<- aov(rt~ group + cond + Error(subj/(cond)),data=data3)
data3 <- data.frame(subj=as.factor(c(subj, as.numeric(subj)+20)),
group = rep(c("Control","Experimental"),each=300),
cond = rep(x1,2),
rt = rep(rt,2) + rnorm(600))
head(data3)
#defining the model is as follows: because group is a between-subject factor, it
#is not nested within the Error() notation, but cond is because it is a within-subject
#variable.
model3<- aov(rt~ group + cond + Error(subj/(cond)),data=data3)
summary(model3)
data3 <- data.frame(subj=as.factor(c(subj, as.numeric(subj)+20)),
group = as.factor(rep(c("Control","Experimental"),each=300)),
cond = rep(x1,2),
rt = rep(rt,2) + rnorm(600))
head(data3)
model3<- aov(rt~ group + cond + Error(subj/(cond)),data=data3)
summary(model3)
c(3500,6000)/8
sqrt(80*.7*.3)
sqrt(80*.7*.3)/80
setwd("~/Dropbox/courses/5220-s2025b/web-5220/psy5220/daily/Day13")
knitr::opts_chunk$set(echo = TRUE)
tmpdat <- data.frame(values=as.numeric(dist(vals2,method="manhattan")))
library(knitr)
library(rmdformats)
## Global options
options(max.print="75")
opts_chunk$set(echo=TRUE,
cache=TRUE,
prompt=FALSE,
tidy=TRUE,
comment=NA,
message=FALSE,
warning=FALSE)
opts_knit$set(width=75)
knitr::opts_chunk$set(echo = TRUE)
set.seed(100)
library(ggplot2)
vals <- data.frame(salary =sample(c(30000,50000,100000),50,replace=T),
gender=sample(1:2,50,replace=T),
party = sample(1:2,50,replace=T))
tmpdat <- data.frame(values=as.numeric(dist(vals)))
tmpdat |>
ggplot(aes(x=values)) + geom_histogram() + theme_bw()
table(tmpdat)
vals2 <- scale(vals)
tmpdat <- data.frame(values=as.numeric(dist(vals2)))
tmpdat |>    ggplot(aes(x=values)) + geom_histogram() + theme_bw()
vals2 <- vals
vals2$salary <- (vals2$salary-min(vals2$salary)) / (max(vals2$salary)-min(vals2$salary))
tmpdat <- data.frame(values=as.numeric(dist(vals2)))
tmpdat |>    ggplot(aes(x=values)) + geom_histogram() + theme_bw()
vals3 <- t((t(vals2) * c(.33,.333,.33)))
tmpdat <- data.frame(values=as.numeric(dist(vals3)))
tmpdat |>    ggplot(aes(x=values)) + geom_histogram() + theme_bw()
tmpdat <- data.frame(values=as.numeric(dist(vals2,method="manhattan")))
tmpdat |>    ggplot(aes(x=values)) + geom_histogram() + theme_bw() + ggtitle("Manhattan distance")
tmpdat <- data.frame(values=as.numeric(dist(vals2,method="minkowski",p=100)))
tmpdat |>    ggplot(aes(x=values)) + geom_histogram() + theme_bw() + ggtitle("Minkowski distance p= 100 (max)")
tmpdat <- data.frame(values=as.numeric(dist(vals2,method="minkowski",p=1/100)))
tmpdat |>    ggplot(aes(x=values)) + geom_histogram() + theme_bw() + ggtitle("Minkowski distance p= 1/100 (min)")
dat <- read.csv("data_study1.csv")
dat$Smartphone <- factor(dat$Smartphone)
library(klaR)
nb <- NaiveBayes(Smartphone~.,data=dat)
library(DAAG)
summary(nb)
nb$apriori
nb$tables
means <- tapply(as.numeric(dat$Age),list(dat$Smartphone),mean)
sds <- tapply(as.numeric(dat$Age),list(dat$Smartphone),sd)
means
sds
library(ggplot2)
phone <- rep(c("iphone","android"),each=100)
density<-c(dnorm(1:100,mean=means[2],sd=sds[2]),
dnorm(1:100,mean=means[1],sd=sds[1]))
df <- data.frame(age=c(1:100,1:100),
phone,density)
ggplot(df,aes(x=age,y=density,colour=phone)) + geom_line(size=2) + theme_bw() + scale_color_manual(values=c("orange2","navy"))
pred <- predict(nb)
confusion(dat$Smartphone,pred$class)
nb2 <- NaiveBayes(Smartphone~.,usekernel=T,data=dat)
nb2$tables
plot(nb2, col=c("navy","orange2"), lwd=1.5)
?plot.NaiveBayes
pred2 <- predict(nb2)
confusion(dat$Smartphone,pred2$class)
library(e1071)
data(dengue)
summary(dengue)
dengue$NoYes <- as.factor(dengue$NoYes)
#This doesn't work
#nb.dengue <- NaiveBayes(NoYes~.,data=dengue)
nb.dengue <- NaiveBayes(NoYes~.,data=dengue,na.action="na.omit") ##this works
#This one works with the klaR NaiveBayes:
nb.dengue2 <- NaiveBayes(NoYes~ h10pix+Xmin+Ymin,data=dengue)
#this one works--from e1071 package
nb.dengue3<- naiveBayes(NoYes~.,data=dengue)
#when we remove the na, we need to remove it from the ground truth too:
confusion(dengue$NoYes[!is.na(rowMeans(dengue[,-9]))],predict(nb.dengue)$class)
confusion(dengue$NoYes,predict(nb.dengue2)$class)
confusion(dengue$NoYes,predict(nb.dengue3,newdata=dengue))
#This doesn't work
#nb.dengue <- NaiveBayes(NoYes~.,data=dengue)
nb.dengue <- NaiveBayes(NoYes~.,data=dengue,na.action="na.omit") ##this works
#This one works with the klaR NaiveBayes:
nb.dengue2 <- klaR::NaiveBayes(NoYes~ h10pix+Xmin+Ymin,data=dengue)
#this one works--from e1071 package
nb.dengue3<- naiveBayes(NoYes~.,data=dengue)
#when we remove the na, we need to remove it from the ground truth too:
confusion(dengue$NoYes[!is.na(rowMeans(dengue[,-9]))],predict(nb.dengue)$class)
confusion(dengue$NoYes,predict(nb.dengue2)$class)
confusion(dengue$NoYes,predict(nb.dengue3,newdata=dengue))
library(klaR)
library(DAAG)
train <-read.csv("trainmnist.csv")
test <- read.csv("testmnist.csv")
set.seed(101)
##smooth out the training set a bit by adding some noise, so no pixel
##has a sd of 0.
for(i in 1:ncol(train))
{
train[,i] <- rnorm(500,as.numeric(train[,i]),.1)
}
par(mfrow=c(2,4))
image(matrix(unlist(train[1,]),nrow=28))
image(matrix(unlist(train[2,]),nrow=28))
image(matrix(unlist(train[3,]),nrow=28))
image(matrix(colMeans(train[1:250,]),nrow=28), main="Average of 250 prototypes")
image(matrix(unlist(train[251,]),nrow=28))
image(matrix(unlist(train[252,]),nrow=28))
image(matrix(unlist(train[253,]),nrow=28))
image(matrix(colMeans(train[251:500,]),nrow=28), main="Average of 250 prototypes")
train$labels <- as.factor(rep(0:1,each=250))
nb3 <- NaiveBayes(labels~.,usekernel=T,data=train)
p3a <- predict(nb3) ##this takes a while
confusion(train$labels,p3a$class) ##almost perfect
p3b<- predict(nb3,test) ##equally good
?klaR::NaiveBayes
confusion(rep(0:1,each=250),p3b$class)
set.seed(200)
for(i in 1:ncol(test))
{
test[,i] <- rnorm(500,as.numeric(test[,i]),.25)
}
par(mfrow=c(2,4))
image(matrix(unlist(test[1,]),nrow=28))
image(matrix(unlist(test[2,]),nrow=28))
image(matrix(unlist(test[3,]),nrow=28))
image(matrix(colMeans(test[1:250,]),nrow=28), main="Average of 250 prototypes")
image(matrix(unlist(test[251,]),nrow=28))
image(matrix(unlist(test[252,]),nrow=28))
image(matrix(unlist(test[253,]),nrow=28))
image(matrix(colMeans(test[251:500,]),nrow=28), main="Average of 250 prototypes")
p3c<- predict(nb3,test) ##This one is still pretty good
confusion(rep(0:1,each=250),p3c$class)
s1.pol <-  svm(y=joint$eng,x=joint[,-1],kernel="polynomial",scale=T,cost=100)
library(knitr)
library(rmdformats)
## Global options
options(max.print="75")
opts_chunk$set(echo=TRUE,
cache=TRUE,
prompt=FALSE,
tidy=TRUE,
comment=NA,
message=FALSE,
warning=FALSE)
opts_knit$set(width=75)
library(e1071)
library(DAAG)
joint <- read.csv("eng-joint.csv")[,-1]
joint$eng <- as.factor(joint$eng)
s1 <- svm(y=joint$eng,x=joint0.697 [,-1],scale=T,kernel="linear",cross=5)
library(e1071)
library(DAAG)
joint <- read.csv("eng-joint.csv")[,-1]
joint$eng <- as.factor(joint$eng)
s1 <- svm(y=joint$eng,x=joint0.697 [,-1],scale=T,kernel="linear",cross=5)
s1 <- svm(y=joint$eng,x=joint[,-1],scale=T,kernel="linear",cross=5)
library(e1071)
library(DAAG)
joint <- read.csv("eng-joint.csv")[,-1]
joint$eng <- as.factor(joint$eng)
s1 <- svm(y=joint$eng,x=joint[,-1],scale=T,kernel="linear",cross=5)
coef(s1)
summary(s1)
confusion(joint$eng,predict(s1,data=joint))
s1.pol <-  svm(y=joint$eng,x=joint[,-1],kernel="polynomial",scale=T)
s1.pol
confusion(joint$eng,(predict(s1.pol)))
s1.pol <-  svm(y=joint$eng,x=joint[,-1],kernel="polynomial",scale=T,cost=100)
s1.pol
confusion(joint$eng,(predict(s1.pol)))
ex4 <- read.csv("ex8a.csv",header=F)
ex4$V1 <- as.factor(ex4$V1)
ggplot(ex4,aes(x=V2,y=V3,colour=V1))+geom_point(size=3)  +theme_minimal() + scale_color_manual(values=c("orange","navy"))
#set.seed(100)
seed <- 20 ;
set.seed(seed)
svm.ng <- svm(V1~.,data=ex4[,1:3],kernel='linear',scale=F,cross=10,cost=10,fitted=T)
svm.ng
confusion(ex4$V1,predict(svm.ng))
ex4$class <- predict(svm.ng)
ex4$class2 <- paste(ex4$V1,ex4$class)
ggplot(ex4,aes(x=V2,y=V3,colour=class2,shape=class))+geom_point(size=3)  +theme_minimal() + scale_color_manual(values=c("darkgreen","orange","red","blue"))
coef(svm.ng)
svm.ng <- svm(V1~.,data=ex4[,1:3],kernel='radial',scale=F,cross=10,cost=100,fitted=T)
svm.ng
confusion(ex4$V1,predict(svm.ng))
ex4$class <- predict(svm.ng)
ex4$class2 <- paste(ex4$V1,ex4$class)
ggplot(ex4,aes(x=V2,y=V3,colour=class2,shape=class))+geom_point(size=3) +theme_minimal() +  scale_color_manual(values=c("darkgreen","orange","red","blue"))
svm.ng <- svm(V1~.,data=ex4[,1:3],type="nu-classification",kernel='radial',scale=F,fitted=T,   gamma=100)
svm.ng
confusion(ex4$V1,predict(svm.ng))
ex4$class <- predict(svm.ng)
ex4$class2 <- paste(ex4$V1,ex4$class)
ggplot(ex4,aes(x=V2,y=V3,colour=class2,shape=class))+geom_point(size=3) +theme_minimal() +  scale_color_manual(values=c("darkgreen","orange","red","blue"))
ex4b <- ex4
ex4b$V2 <- ex4b$V2 + rnorm(nrow(ex4),mean=0,sd=.05)
ex4b$V3 <- ex4b$V3 + rnorm(nrow(ex4),mean=0,sd=.05)
svm.ng <- svm(V1~.,data=ex4b[,1:3],kernel='radial',scale=T,cross=100,gamma=100,cost=.5)
svm.ng
table(ex4b$V1,predict(svm.ng))
ex4b$class <- predict(svm.ng)
ex4b$class2 <- paste(ex4b$V1,ex4b$class)
ggplot(ex4b,aes(x=V2,y=V3,colour=class2,shape=class))+geom_point(size=3)  +theme_minimal()   +  scale_color_manual(values=c("darkgreen","orange","red","blue"))
