阅读 65

神经网络模型+进阶

PART I 神经网络模型
  1. 模拟数据
set.seed(888)
x1 <- rnorm(1000,0)
set.seed(666)
x2 <- rnorm(1000,0)

logit1 <- 2+3*x1+x1^2-4*x2
logit2 <- 1.5+2*x1-3*x2^2+x2

Denominator <- 1+exp(logit1)+exp(logit2) 
#denominator for probability calculation
vProb <- cbind(1/Denominator,exp(logit1)/Denominator,exp(logit2)/Denominator) 
#calculating the matrix of probabilities for there choices

mChoices <- t(apply(vProb,1,rmultinom,n=1,size=1)) 
#Assigning value 1 to maximum probability and 0 for the rest to get the appropriate choices for the combinations of x1 and x2
data <- cbind.data.frame(y=as.factor(apply(mChoices,1,function(x)which(x==1))),x1,x2) 
#response variable and predictors x1 and x2 and combined together

str(data)
# 'data.frame': 1000 obs. of  3 variables:
#   $ y : Factor w/ 3 levels "1","2","3": 1 1 2 1 2 3 2 2 2 2 ...
# $ x1: num  -1.951 -1.544 0.73 -0.278 -1.656 ...
# $ x2: num  0.753 2.014 -0.355 2.028 -2.217 ...
  1. 查看模拟数据
library(ggplot2)
qplot(x1,x2,data=data,geom="point",color=y)
image.png
  1. 神经网络模型训练
library(nnet)
train <- data[1:700,]
test <- data[701:1000,]
annmod <- nnet(y~x1+x2,train,size=6)
# # weights:  39
# initial  value 1106.979671 
# iter  10 value 325.827182
# iter  20 value 291.472800
# iter  30 value 284.906627
# iter  40 value 282.896526
# iter  50 value 281.619506
# iter  60 value 281.353716
# iter  70 value 280.478852
# iter  80 value 280.026634
# iter  90 value 279.878004
# iter 100 value 278.301294
# final  value 278.301294 
# stopped after 100 iterations

annmod
# a 2-6-3 network with 39 weights
# inputs: x1 x2 
# output(s): y 
# options were - softmax modelling
  1. 可视化训练结果
library(devtools)

source_url('https://gist.github.com/fawda123/7471137/raw/cd6e6a0b0bdb4e065c597e52165e5ac887f5fe95/nnet_plot_update.r')
plot.nnet(annmod,alpha.val=0.5,pos.col ="green",neg.col="red")
image.png
  1. 神经网络模型结果评估
pred <- predict(annmod,test[,-1],type = "class")
table(test[,1],pred)
# pred
#    1   2   3
# 1  45  13   9
# 2   5 163  15
# 3  12  15  23
  1. 平均准确度(average accuracy)
accuracyCal <- function(N){
  accuracy <- 1
  for (x in 1:N){
    annmod <-nnet(y~.,data = train,size =x,trace = FALSE,maxit=200)
    pred <- predict(annmod,test[,-1],type = "class")
    table <- table(test[,1],pred)
    if (ncol(table)==3){
      table <- table
    }
    else{
      table <- cbind(table,c(0,0,0))
    }
    tp1 <- table[1,1]
    tp2 <- table[2,2]
    tp3 <- table[3,3]
    tn1 <- table[2,2]+table[2,3]+table[3,2]+table[3,3]
    tn2 <- table[1,1]+table[1,3]+table[3,1]+table[3,3]
    tn3 <- table[1,1]+table[1,2]+table[2,1]+table[2,2]
    fn1 <- table[1,2]+table[1,3]
    fn2 <- table[2,1]+table[2,3]
    fn3 <- table[3,1]+table[3,2]
    fp1 <- table[2,1]+table[3,1]
    fp2 <- table[1,2]+table[3,2]
    fp3 <- table[1,3]+table[2,3]
    accuracy <- c(accuracy,(((tp1+tn1/(tp1+fn1+fp1+tn1))+(tp2+tn2)/(tp2+fn2+fp2+tn2))+((tp3+tn3)/(tp3+fn3+fp3+tn3)))/3)}
  return(accuracy[-1])
}

accuracySeri <- accuracyCal(30)

plot(accuracySeri,type = "b",xlab = "Number of units in the hidden layer.",
     ylab = "Average Accuracy")
image.png
  1. 与广义线性模型比较
model.lin <- multinom(y~.,train)
pred.lin <- predict(model.lin,test[,-1])
table <- table(test[,1],pred.lin)
table
# pred.lin
#    1   2   3
# 1  51  14   2
# 2  12 168   3
# 3  17  31   2

tp1 <- table[1,1]
tp2 <- table[2,2]
tp3 <- table[3,3]
tn1 <- table[2,2]+table[2,3]+table[3,2]+table[3,3]
tn2 <- table[1,1]+table[1,3]+table[3,1]+table[3,3]
tn3 <- table[1,1]+table[1,2]+table[2,1]+table[2,2]
fn1 <- table[1,2]+table[1,3]
fn2 <- table[2,1]+table[2,3]
fn3 <- table[3,1]+table[3,2]
fp1 <- table[2,1]+table[3,1]
fp2 <- table[1,2]+table[3,2]
fp3 <- table[1,3]+table[2,3]

accuracy <- (((tp1+tn1)/(tp1+fn1+fp1+tn1))+((tp2+tn2)/(tp2+fn2+fp2+tn2))+((tp3+tn3)/(tp3+fn3+fp3+tn3)))/3

accuracy
# [1] 0.8244444
PART II 神经网络模型进阶
  1. 导入练习数据
library(MASS)
data(birthwt)
str(birthwt)

# 'data.frame': 189 obs. of  10 variables:
#   $ low  : int  0 0 0 0 0 0 0 0 0 0 ...
# $ age  : int  19 33 20 21 18 21 22 17 29 26 ...
# $ lwt  : int  182 155 105 108 107 124 118 103 123 113 ...
# $ race : int  2 3 1 1 1 3 1 3 1 1 ...
# $ smoke: int  0 0 1 1 1 0 0 0 1 1 ...
# $ ptl  : int  0 0 0 0 0 0 0 0 0 0 ...
# $ ht   : int  0 0 0 0 0 0 0 0 0 0 ...
# $ ui   : int  1 0 0 1 1 0 0 0 0 0 ...
# $ ftv  : int  0 3 1 2 0 0 1 1 1 0 ...
# $ bwt  : int  2523 2551 2557 2594 2600 2622 2637 2637 2663 2665 ...
image.png
  1. 训练神经网络模型
library(neuralnet)

nn <- neuralnet(low ~ age+lwt+race+smoke+ptl+ht+ui+ftv,data = birthwt,hidden = 2,err.fct = "ce",
                linear.output =FALSE)
plot(nn)
image.png

Error function 用于描述预测结果与观察结果的差别,差别越大说明模型越差。
开始时模型随机选取一个权重(随机模型),获得预测结果后与实际观测值比较,比较后再进行调整权重,如此反复直至获得最佳模型。

  1. Generalized weights: 各个变量对模型的贡献(重要程度)
nn.limited <- neuralnet(
  low ~ age+lwt+race+smoke,
  data = birthwt,hidden = 4,err.fct = "ce",
  linear.output = FALSE)

plot(nn.limited)
image.png
par(mfrow = c(2,2))
gwplot(nn.limited,selected.covariate = "age")
gwplot(nn.limited,selected.covariate = "lwt")
gwplot(nn.limited,selected.covariate = "race")
gwplot(nn.limited,selected.covariate = "smoke")

# 在windows系统里会出现以下报错
# Error in plot.window(...) : need finite 'ylim' values
image.png
  1. 模型预测
new.mother <- matrix(c(23,105,3,1,26,111,2,0,31,125,2,1,35,136,1,0),
                     byrow = TRUE,ncol = 4)
new.mother
#       [,1] [,2] [,3] [,4]
# [1,]   23  105    3    1
# [2,]   26  111    2    0
# [3,]   31  125    2    1
# [4,]   35  136    1    0

pred <- compute(nn.limited,new.mother)
pred$net.result
#        [,1]
# [1,] 0.39809269
# [2,] 0.39809269
# [3,] 0.39809269
# [4,] 0.05554679

参考资料
章仲恒教授丁香园课程:神经网络模型神经网络模型进阶

作者:北欧森林

原文链接:https://www.jianshu.com/p/e44429eaaa5e

文章分类
后端
文章标签
版权声明:本站是系统测试站点,无实际运营。本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌抄袭侵权/违法违规的内容, 请发送邮件至 XXXXXXo@163.com 举报,一经查实,本站将立刻删除。
相关推荐