Title: | Deep Learning Toolkit in R |
---|---|
Description: | Implement some deep learning architectures and neural network algorithms, including BP,RBM,DBN,Deep autoencoder and so on. |
Authors: | Xiao Rong |
Maintainer: | Xiao Rong <[email protected]> |
License: | GPL |
Version: | 0.2.1 |
Built: | 2024-11-18 05:54:12 UTC |
Source: | https://github.com/cran/deepnet |
Training a Deep neural network with weights initialized by DBN
dbn.dnn.train(x, y, hidden = c(1), activationfun = "sigm", learningrate = 0.8, momentum = 0.5, learningrate_scale = 1, output = "sigm", numepochs = 3, batchsize = 100, hidden_dropout = 0, visible_dropout = 0, cd = 1)
dbn.dnn.train(x, y, hidden = c(1), activationfun = "sigm", learningrate = 0.8, momentum = 0.5, learningrate_scale = 1, output = "sigm", numepochs = 3, batchsize = 100, hidden_dropout = 0, visible_dropout = 0, cd = 1)
x |
matrix of x values for examples |
y |
vector or matrix of target values for examples |
vector for number of units of hidden layers.Default is c(10). |
|
activationfun |
activation function of hidden unit.Can be "sigm","linear" or "tanh".Default is "sigm" for logistic function |
learningrate |
learning rate for gradient descent. Default is 0.8. |
momentum |
momentum for gradient descent. Default is 0.5 . |
learningrate_scale |
learning rate will be mutiplied by this scale after every iteration. Default is 1 . |
numepochs |
number of iteration for samples Default is 3. |
batchsize |
size of mini-batch. Default is 100. |
output |
function of output unit, can be "sigm","linear" or "softmax". Default is "sigm". |
drop out fraction for hidden layer. Default is 0. |
|
visible_dropout |
drop out fraction for input layer Default is 0. |
cd |
number of iteration for Gibbs sample of CD algorithm. |
Xiao Rong
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) x <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) y <- c(rep(1, 50), rep(0, 50)) dnn <- dbn.dnn.train(x, y, hidden = c(5, 5)) ## predict by dnn test_Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) test_Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) test_x <- matrix(c(test_Var1, test_Var2), nrow = 100, ncol = 2) nn.test(dnn, test_x, y)
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) x <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) y <- c(rep(1, 50), rep(0, 50)) dnn <- dbn.dnn.train(x, y, hidden = c(5, 5)) ## predict by dnn test_Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) test_Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) test_x <- matrix(c(test_Var1, test_Var2), nrow = 100, ncol = 2) nn.test(dnn, test_x, y)
Load MNIST DataSet
load.mnist(dir)
load.mnist(dir)
dir |
dir of minst dataset |
mnist dataset train$n number of train samples train$x pix of every train sample image train$y label of every train sample image train$yy one-of-c vector of label of train sample image test$n number of test samples test$x pix of every test sample image test$y label of every test sample image test$yy one-of-c vector of label of test sample image
Xiao Rong
Predict new samples by Trainded NN
nn.predict(nn, x)
nn.predict(nn, x)
nn |
nerual network trained by function nn.train |
x |
new samples to predict |
return raw output value of neural network.For classification task,return the probability of a class
Xiao Rong
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) x <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) y <- c(rep(1, 50), rep(0, 50)) nn <- nn.train(x, y, hidden = c(5)) ## predict by nn test_Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) test_Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) test_x <- matrix(c(test_Var1, test_Var2), nrow = 100, ncol = 2) yy <- nn.predict(nn, test_x)
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) x <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) y <- c(rep(1, 50), rep(0, 50)) nn <- nn.train(x, y, hidden = c(5)) ## predict by nn test_Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) test_Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) test_x <- matrix(c(test_Var1, test_Var2), nrow = 100, ncol = 2) yy <- nn.predict(nn, test_x)
Test new samples by Trainded NN,return error rate for classification
nn.test(nn, x, y, t = 0.5)
nn.test(nn, x, y, t = 0.5)
nn |
nerual network trained by function nn.train |
x |
new samples to predict |
y |
new samples' label |
t |
threshold for classification. If nn.predict value >= t then label 1,else label 0 |
error rate
Xiao Rong
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) x <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) y <- c(rep(1, 50), rep(0, 50)) nn <- nn.train(x, y, hidden = c(5)) test_Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) test_Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) test_x <- matrix(c(test_Var1, test_Var2), nrow = 100, ncol = 2) err <- nn.test(nn, test_x, y)
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) x <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) y <- c(rep(1, 50), rep(0, 50)) nn <- nn.train(x, y, hidden = c(5)) test_Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) test_Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) test_x <- matrix(c(test_Var1, test_Var2), nrow = 100, ncol = 2) err <- nn.test(nn, test_x, y)
Training single or mutiple hidden layers neural network by BP
nn.train(x, y, initW = NULL, initB = NULL, hidden = c(10), activationfun = "sigm", learningrate = 0.8, momentum = 0.5, learningrate_scale = 1, output = "sigm", numepochs = 3, batchsize = 100, hidden_dropout = 0, visible_dropout = 0)
nn.train(x, y, initW = NULL, initB = NULL, hidden = c(10), activationfun = "sigm", learningrate = 0.8, momentum = 0.5, learningrate_scale = 1, output = "sigm", numepochs = 3, batchsize = 100, hidden_dropout = 0, visible_dropout = 0)
x |
matrix of x values for examples |
y |
vector or matrix of target values for examples |
initW |
initial weights. If missing chosen at random |
initB |
initial bias. If missing chosen at random |
vector for number of units of hidden layers.Default is c(10). |
|
activationfun |
activation function of hidden unit.Can be "sigm","linear" or "tanh".Default is "sigm" for logistic function |
learningrate |
learning rate for gradient descent. Default is 0.8. |
momentum |
momentum for gradient descent. Default is 0.5 . |
learningrate_scale |
learning rate will be mutiplied by this scale after every iteration. Default is 1 . |
numepochs |
number of iteration for samples Default is 3. |
batchsize |
size of mini-batch. Default is 100. |
output |
function of output unit, can be "sigm","linear" or "softmax". Default is "sigm". |
drop out fraction for hidden layer. Default is 0. |
|
visible_dropout |
drop out fraction for input layer Default is 0. |
Xiao Rong
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) x <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) y <- c(rep(1, 50), rep(0, 50)) nn <- nn.train(x, y, hidden = c(5))
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) x <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) y <- c(rep(1, 50), rep(0, 50)) nn <- nn.train(x, y, hidden = c(5))
Generate visible vector by hidden units states
rbm.down(rbm, h)
rbm.down(rbm, h)
rbm |
an rbm object trained by function train.rbm |
h |
hidden units states |
generated visible vector
Xiao Rong
Var1 <- c(rep(1, 50), rep(0, 50)) Var2 <- c(rep(0, 50), rep(1, 50)) x3 <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) r1 <- rbm.train(x3, 3, numepochs = 20, cd = 10) h <- c(0.2, 0.8, 0.1) v <- rbm.down(r1, h)
Var1 <- c(rep(1, 50), rep(0, 50)) Var2 <- c(rep(0, 50), rep(1, 50)) x3 <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) r1 <- rbm.train(x3, 3, numepochs = 20, cd = 10) h <- c(0.2, 0.8, 0.1) v <- rbm.down(r1, h)
Training a RBM(restricted Boltzmann Machine)
rbm.train(x, hidden, numepochs = 3, batchsize = 100, learningrate = 0.8, learningrate_scale = 1, momentum = 0.5, visible_type = "bin", hidden_type = "bin", cd = 1)
rbm.train(x, hidden, numepochs = 3, batchsize = 100, learningrate = 0.8, learningrate_scale = 1, momentum = 0.5, visible_type = "bin", hidden_type = "bin", cd = 1)
x |
matrix of x values for examples |
number of hidden units |
|
visible_type |
activation function of input unit.Only support "sigm" now |
activation function of hidden unit.Only support "sigm" now |
|
learningrate |
learning rate for gradient descent. Default is 0.8. |
momentum |
momentum for gradient descent. Default is 0.5 . |
learningrate_scale |
learning rate will be mutiplied by this scale after every iteration. Default is 1 . |
numepochs |
number of iteration for samples Default is 3. |
batchsize |
size of mini-batch. Default is 100. |
cd |
number of iteration for Gibbs sample of CD algorithm. |
Xiao Rong
Var1 <- c(rep(1, 50), rep(0, 50)) Var2 <- c(rep(0, 50), rep(1, 50)) x3 <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) r1 <- rbm.train(x3, 10, numepochs = 20, cd = 10)
Var1 <- c(rep(1, 50), rep(0, 50)) Var2 <- c(rep(0, 50), rep(1, 50)) x3 <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) r1 <- rbm.train(x3, 10, numepochs = 20, cd = 10)
Infer hidden units states by visible units
rbm.up(rbm, v)
rbm.up(rbm, v)
rbm |
an rbm object trained by function train.rbm |
v |
visible units states |
hidden units states
Xiao Rong
Var1 <- c(rep(1, 50), rep(0, 50)) Var2 <- c(rep(0, 50), rep(1, 50)) x3 <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) r1 <- rbm.train(x3, 3, numepochs = 20, cd = 10) v <- c(0.2, 0.8) h <- rbm.up(r1, v)
Var1 <- c(rep(1, 50), rep(0, 50)) Var2 <- c(rep(0, 50), rep(1, 50)) x3 <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) r1 <- rbm.train(x3, 3, numepochs = 20, cd = 10) v <- c(0.2, 0.8) h <- rbm.up(r1, v)
Training a Deep neural network with weights initialized by Stacked AutoEncoder
sae.dnn.train(x, y, hidden = c(1), activationfun = "sigm", learningrate = 0.8, momentum = 0.5, learningrate_scale = 1, output = "sigm", sae_output = "linear", numepochs = 3, batchsize = 100, hidden_dropout = 0, visible_dropout = 0)
sae.dnn.train(x, y, hidden = c(1), activationfun = "sigm", learningrate = 0.8, momentum = 0.5, learningrate_scale = 1, output = "sigm", sae_output = "linear", numepochs = 3, batchsize = 100, hidden_dropout = 0, visible_dropout = 0)
x |
matrix of x values for examples |
y |
vector or matrix of target values for examples |
vector for number of units of hidden layers.Default is c(10). |
|
activationfun |
activation function of hidden unit.Can be "sigm","linear" or "tanh".Default is "sigm" for logistic function |
learningrate |
learning rate for gradient descent. Default is 0.8. |
momentum |
momentum for gradient descent. Default is 0.5 . |
learningrate_scale |
learning rate will be mutiplied by this scale after every iteration. Default is 1 . |
numepochs |
number of iteration for samples Default is 3. |
batchsize |
size of mini-batch. Default is 100. |
output |
function of output unit, can be "sigm","linear" or "softmax". Default is "sigm". |
sae_output |
function of autoencoder output unit, can be "sigm","linear" or "softmax". Default is "linear". |
drop out fraction for hidden layer. Default is 0. |
|
visible_dropout |
drop out fraction for input layer Default is 0. |
Xiao Rong
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) x <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) y <- c(rep(1, 50), rep(0, 50)) dnn <- sae.dnn.train(x, y, hidden = c(5, 5)) ## predict by dnn test_Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) test_Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) test_x <- matrix(c(test_Var1, test_Var2), nrow = 100, ncol = 2) nn.test(dnn, test_x, y)
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) x <- matrix(c(Var1, Var2), nrow = 100, ncol = 2) y <- c(rep(1, 50), rep(0, 50)) dnn <- sae.dnn.train(x, y, hidden = c(5, 5)) ## predict by dnn test_Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2)) test_Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1)) test_x <- matrix(c(test_Var1, test_Var2), nrow = 100, ncol = 2) nn.test(dnn, test_x, y)