在R?中使用Keras Pack进行L1和L2正则化,

2guxujil  于 2022-11-13  发布在  其他
关注(0)|答案(1)|浏览(179)
library(keras)
build_model <- function() {
  model <- keras_model_sequential() %>% 
    layer_dense(units = 64, activation = "relu", 
                input_shape = dim(train_data)[[2]]) %>% 
    regularizer_l1_l2(l1 = 0.01, l2 = 0.01) %>% 
    layer_dense(units = 64, activation = "relu") %>%
    regularizer_l1_l2(l1 = 0.01, l2 = 0.01) %>% 
    layer_dense(units = 1) 

  model %>% compile(
    optimizer = "rmsprop", 
    loss = "mse", 
    metrics = c("mae")
  )
}
model <- build_model()

我尝试使用R中的keras应用L1和L2正则化。然而,我得到一个错误:

Error in regularizer_l1_l2(., l1 = 0.01, l2 = 0.01) : unused argument (.)

我使用的正则化语法与链接中提到的语法相同。https://keras.rstudio.com/reference/regularizer_l1.html有人能告诉我我做错了什么吗?

7bsow1i6

7bsow1i61#

这将是使用R中的keras进行L1和L2正则化的正确语法:

library(keras)
build_model <- function() {
  model <- keras_model_sequential() %>% 
    layer_dense(units = 64,
                activation = "relu", 
                kernel_regularizer = regularizer_l1_l2(l1 = 0.01, l2 = 0.01),
                input_shape = dim(train_data)[[2]]) %>% 
    layer_dense(units = 64,
                activation = "relu",
                kernel_regularizer = regularizer_l1_l2(l1 = 0.01, l2 = 0.01)) %>%
    layer_dense(units = 1) 
  
  model %>% compile(
    optimizer = "rmsprop", 
    loss = "mse", 
    metrics = c("mae")
  )
}

可重复实施例:

library(keras)

mnist <- dataset_mnist()
train_images <- mnist$train$x
train_labels <- mnist$train$y
test_images <- mnist$test$x
test_labels <- mnist$test$y

train_images <- array_reshape(train_images, c(60000, 28*28))
train_images <- train_images / 255
test_images <- array_reshape(test_images, c(10000, 28*28))
test_images <- test_images / 255

train_labels <- to_categorical(train_labels)
test_labels <- to_categorical(test_labels)

network <- keras_model_sequential() %>%
  layer_dense(units = 512,
              activation = "relu",
              kernel_regularizer = regularizer_l1_l2(l1 = 0.001, l2 = 0.001),
              input_shape = c(28 * 28)) %>%
  layer_dense(units = 10, activation = "softmax")

network %>% compile(
  optimizer = "rmsprop",
  loss = "categorical_crossentropy",
  metrics = c("accuracy")
)

network %>% fit(train_images,
                train_labels,
                epochs = 5,
                batch_size = 128)

metrics <- network %>% evaluate(test_images, test_labels)

> metrics
#output
$`loss`
[1] 0.6863746

$acc
[1] 0.921

相关问题