#################################### ##### Color Perception Network ##### #################################### setwd("PATHTODATA") library(neuralnet) ##### Generate Data # set.seed(100) # # dat <- data.frame(R = sample(255,500,replace=T), # G = sample(255,500,replace=T), # B = sample(255,500,replace=T)) # # write.table(dat,file="colors.txt",row.names=F,quote=F) # # ##### Read Data cols <- read.table("colors.txt",header=T,nrow=250) cols$label <- as.character(cols$label) ## re-format cols[,unique(cols$label)] <- 0 for(i in 1:nrow(cols)){ cols[i,cols[i,"label"]] <- 1 } ##### Check data table(cols$label) ### any comments on the training set? ##### Define functions to plot colors plot.col <- function(dat,n){ plot(c(1,1),type="n",axes=F,xlab="",ylab="",ylim=c(0,2),xlim=c(0,2)) row.n <- n row <- dat[row.n,] points(x=1,y=1,cex=35,pch=19,col=rgb(row["R"],row["G"],row["B"],maxColorValue = 255)) } plot.col(cols,1) plot.rgb <- function(R,G,B){ plot(c(1,1),type="n",axes=F,xlab="",ylab="",ylim=c(0,2),xlim=c(0,2)) points(x=1,y=1,cex=35,pch=19,col=rgb(R,G,B,maxColorValue = 255)) } plot.rgb(255,0,0) ##### Train a network: Predict labels with RGB values library(neuralnet) f1 <- paste(unique(cols$label),"+ ",collapse="") f1 <- gsub(f1,pattern=" \\+ $",replacement="") formula <- paste(f1,"~ R + G + B") formula ## training: TRAIN A NEURAL NETWORK WITH NO HIDDEN LAYER ## plot plot(nn) plot(nn,intercept = F) ##### Try to predict some color labels ### random colors set.seed(100) testset <- data.frame(R = sample(255,10,replace=T), G = sample(255,10,replace=T), B = sample(255,10,replace=T)) predicted <- predict(nn,testset) colnames(predicted) <- nn$model.list$response predicted plot.col(testset,3) ### focal colors testset2 <- data.frame(R = 0, G= 0, B = 0) testset2[1,] <- c(255,0,0) testset2[2,] <- c(0,255,0) testset2[3,] <- c(0,0,255) testset2[4,] <- c(255,255,0) testset2[5,] <- c(0,255,255) testset2[6,] <- c(255,0,255) testset2[7,] <- c(0,0,0) testset2[8,] <- c(255,255,255) testset2[9,] <- c(255,120,0) testset2[10,] <- c(139,69,19) testset2[11,] <- c(169,169,169) predicted2 <- predict(nn,testset2) colnames(predicted2) <- nn$model.list$response predicted2 plot.col(testset2,11) ##### Inspect the weights weights <- round(nn$weights[[1]][[1]],5) rownames(weights) <- c("bias",nn$model.list$variables) colnames(weights) <- c(nn$model.list$response) weights ## alternative: nn$result.matrix ################################################## ##### Another training with one hidden layer ##### ## training: TRAIN A NEURALNET WITH ONE HIDDEN LAYER (3 NODES) ## (note: if the model doesn't converge, adjust the training parameters - which ones?) ## (note2: sometimes, the training just stagnates. this happens. just run it again.) plot(nn2,intercept=F) ##### model predictions predicted <- compute(nn2,testset)$net.result colnames(predicted) <- nn2$model.list$response predicted plot.col(testset,8) ##### model predictions predicted2 <- compute(nn2,testset2)$net.result colnames(predicted2) <- nn2$model.list$response predicted2 plot.col(testset2,11) ##### Inspect the weights ## input to hidden layer weights2a <- round(nn2$weights[[1]][[1]],5) rownames(weights2a) <- c("bias",nn2$model.list$variables) colnames(weights2a) <- c("node1","node2","node3") weights2a ## hidden layer to output weights2b <- round(nn2$weights[[1]][[2]],5) rownames(weights2b) <- c("bias","node1","node2","node3") colnames(weights2b) <- c(nn2$model.list$response) weights2b ## alternative: nn2$result.matrix ### Inspect the hidden layer activations <- compute(nn2,testset2)$neurons[[2]] activations <- round(activations,5) activations plot.col(testset2,8)