From 384fb80f8944f62be73953722edb82b90b3726e1 Mon Sep 17 00:00:00 2001 From: Kentaro Matsuura Date: Wed, 2 Oct 2024 20:12:28 +0900 Subject: [PATCH] for CRAN resubmission --- DESCRIPTION | 2 +- R/adjust_significance_level.R | 23 ++++++++++++++++++----- R/learn_allocation_rule.R | 10 ++++------ R/rl_config_set.R | 5 ++--- R/rl_dnn_config.R | 5 ++--- R/simulate_one_trial.R | 32 +++++++++++++++++++++++++++----- man/adjust_significance_level.Rd | 23 ++++++++++++++++++----- man/learn_allocation_rule.Rd | 10 ++++------ man/rl_config_set.Rd | 5 ++--- man/rl_dnn_config.Rd | 5 ++--- man/simulate_one_trial.Rd | 32 +++++++++++++++++++++++++++----- 11 files changed, 107 insertions(+), 45 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 737c076..0cbc8fe 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: RLoptimal Type: Package Title: Optimal Adaptive Allocation Using Deep Reinforcement Learning -Version: 1.0.0 +Version: 1.0.1 Authors@R: c( person("Kentaro", "Matsuura", , "matsuurakentaro55@gmail.com", role = c("aut", "cre", "cph"), comment = c(ORCID = "0000-0001-5262-055X")), diff --git a/R/adjust_significance_level.R b/R/adjust_significance_level.R index 6e8bec0..0fbed5c 100644 --- a/R/adjust_significance_level.R +++ b/R/adjust_significance_level.R @@ -25,18 +25,31 @@ #' @returns A positive numeric value specifying adjusted significance level. #' #' @examples -#' # We computed `allocation_rule` -#' # ... +#' library(RLoptimal) #' +#' doses <- c(0, 2, 4, 6, 8) +#' +#' models <- DoseFinding::Mods( +#' doses = doses, maxEff = 1.65, +#' linear = NULL, emax = 0.79, sigEmax = c(4, 5) +#' ) +#' +#' \dontrun{ +#' allocation_rule <- learn_allocation_rule( +#' models, +#' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, +#' outcome_type = "continuous", sd_normal = sqrt(4.5), +#' seed = 123, rl_config = rl_config_set(iter = 1000), +#' alpha = 0.025 +#' ) +#' #' # Simulation-based adjustment of the significance level using `allocation_rule` -#' \donttest{ #' adjusted_alpha <- adjust_significance_level( #' allocation_rule, models, #' N_total = 150, N_ini = rep(10, 5), N_block = 10, #' outcome_type = "continuous", sd_normal = sqrt(4.5), #' alpha = 0.025, n_sim = 10000, seed = 123 -#' ) -#' } +#' )} #' #' @importFrom stats quantile #' diff --git a/R/learn_allocation_rule.R b/R/learn_allocation_rule.R index 624658f..fd0f332 100644 --- a/R/learn_allocation_rule.R +++ b/R/learn_allocation_rule.R @@ -65,15 +65,14 @@ #' #' # We obtain an optimal adaptive allocation rule by executing #' # `learn_allocation_rule()` with the `models`. -#' \donttest{ +#' \dontrun{ #' allocation_rule <- learn_allocation_rule( #' models, #' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, #' outcome_type = "continuous", sd_normal = sqrt(4.5), #' seed = 123, rl_config = rl_config_set(iter = 1000), #' alpha = 0.025 -#' ) -#' } +#' )} #' #' # It is recommended that the models used in reinforcement learning include #' # possible models in addition to the models used in the MCPMod method. @@ -85,15 +84,14 @@ #' ) #' #' # Then, we specify the argument `rl_models` in `learn_allocation_rule` function. -#' \donttest{ +#' \dontrun{ #' allocation_rule <- learn_allocation_rule( #' models, #' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, #' outcome_type = "continuous", sd_normal = sqrt(4.5), #' seed = 123, rl_models = rl_models, rl_config = rl_config_set(iter = 1000), #' alpha = 0.025 -#' ) -#' } +#' )} #' #' @importFrom glue glue #' diff --git a/R/rl_config_set.R b/R/rl_config_set.R index 3276888..b44ce3d 100644 --- a/R/rl_config_set.R +++ b/R/rl_config_set.R @@ -27,7 +27,7 @@ #' @return A list of reinforcement learning configuration parameters #' #' @examples -#' \donttest{ +#' \dontrun{ #' allocation_rule <- learn_allocation_rule( #' models, #' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, @@ -36,8 +36,7 @@ #' # We change `iter` to 200 and `cores` for reinforcement learning to 2 #' rl_config = rl_config_set(iter = 200, cores = 2), #' alpha = 0.025 -#' ) -#' } +#' )} #' #' @export rl_config_set <- function(iter = 1000L, diff --git a/R/rl_dnn_config.R b/R/rl_dnn_config.R index 1ee6a2d..bcbc3bc 100644 --- a/R/rl_dnn_config.R +++ b/R/rl_dnn_config.R @@ -14,7 +14,7 @@ #' @return A list of DNN configuration parameters #' #' @examples -#' \donttest{ +#' \dontrun{ #' allocation_rule <- learn_allocation_rule( #' models, #' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, @@ -27,8 +27,7 @@ #' model = rl_dnn_config(fcnet_hiddens = c(512L, 512L), fcnet_activation = "tanh") #' ), #' alpha = 0.025 -#' ) -#' } +#' )} #' #' @export rl_dnn_config <- function( diff --git a/R/simulate_one_trial.R b/R/simulate_one_trial.R index 91354f7..d3b09b0 100644 --- a/R/simulate_one_trial.R +++ b/R/simulate_one_trial.R @@ -37,9 +37,31 @@ #' the estimated target dose, and the MAE. #' #' @examples -#' \donttest{ -#' # We computed `allocation_rule` and `adjusted_alpha` -#' # ... +#' library(RLoptimal) +#' +#' doses <- c(0, 2, 4, 6, 8) +#' +#' models <- DoseFinding::Mods( +#' doses = doses, maxEff = 1.65, +#' linear = NULL, emax = 0.79, sigEmax = c(4, 5) +#' ) +#' +#' \dontrun{ +#' allocation_rule <- learn_allocation_rule( +#' models, +#' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, +#' outcome_type = "continuous", sd_normal = sqrt(4.5), +#' seed = 123, rl_config = rl_config_set(iter = 1000), +#' alpha = 0.025 +#' ) +#' +#' # Simulation-based adjustment of the significance level using `allocation_rule` +#' adjusted_alpha <- adjust_significance_level( +#' allocation_rule, models, +#' N_total = 150, N_ini = rep(10, 5), N_block = 10, +#' outcome_type = "continuous", sd_normal = sqrt(4.5), +#' alpha = 0.025, n_sim = 10000, seed = 123 +#' )} #' #' eval_models <- DoseFinding::Mods( #' doses = doses, maxEff = 1.65, @@ -51,14 +73,14 @@ #' true_model_name <- "emax" #' #' # Simulate one trial using the obtained `allocation_rule` When the true model is "emax" +#' \dontrun{ #' res_one <- simulate_one_trial( #' allocation_rule, models, #' true_response = true_response_list[[true_model_name]], #' N_total = 150, N_ini = rep(10, 5), N_block = 10, #' Delta = 1.3, outcome_type = "continuous", sd_normal = sqrt(4.5), #' alpha = adjusted_alpha, seed = simID, eval_type = "all" -#' ) -#' } +#' )} #' #' @importFrom stats coef binomial glm plogis predict rbinom rnorm vcov #' diff --git a/man/adjust_significance_level.Rd b/man/adjust_significance_level.Rd index 99f5866..99b878c 100644 --- a/man/adjust_significance_level.Rd +++ b/man/adjust_significance_level.Rd @@ -56,17 +56,30 @@ A positive numeric value specifying adjusted significance level. Adjust Significance Level on a Simulation Basis } \examples{ -# We computed `allocation_rule` -# ... +library(RLoptimal) + +doses <- c(0, 2, 4, 6, 8) + +models <- DoseFinding::Mods( + doses = doses, maxEff = 1.65, + linear = NULL, emax = 0.79, sigEmax = c(4, 5) +) + +\dontrun{ +allocation_rule <- learn_allocation_rule( + models, + N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, + outcome_type = "continuous", sd_normal = sqrt(4.5), + seed = 123, rl_config = rl_config_set(iter = 1000), + alpha = 0.025 +) # Simulation-based adjustment of the significance level using `allocation_rule` -\donttest{ adjusted_alpha <- adjust_significance_level( allocation_rule, models, N_total = 150, N_ini = rep(10, 5), N_block = 10, outcome_type = "continuous", sd_normal = sqrt(4.5), alpha = 0.025, n_sim = 10000, seed = 123 -) -} +)} } diff --git a/man/learn_allocation_rule.Rd b/man/learn_allocation_rule.Rd index 7fb32b0..0f085be 100644 --- a/man/learn_allocation_rule.Rd +++ b/man/learn_allocation_rule.Rd @@ -112,15 +112,14 @@ models <- DoseFinding::Mods( # We obtain an optimal adaptive allocation rule by executing # `learn_allocation_rule()` with the `models`. -\donttest{ +\dontrun{ allocation_rule <- learn_allocation_rule( models, N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, outcome_type = "continuous", sd_normal = sqrt(4.5), seed = 123, rl_config = rl_config_set(iter = 1000), alpha = 0.025 -) -} +)} # It is recommended that the models used in reinforcement learning include # possible models in addition to the models used in the MCPMod method. @@ -132,14 +131,13 @@ rl_models <- DoseFinding::Mods( ) # Then, we specify the argument `rl_models` in `learn_allocation_rule` function. -\donttest{ +\dontrun{ allocation_rule <- learn_allocation_rule( models, N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, outcome_type = "continuous", sd_normal = sqrt(4.5), seed = 123, rl_models = rl_models, rl_config = rl_config_set(iter = 1000), alpha = 0.025 -) -} +)} } diff --git a/man/rl_config_set.Rd b/man/rl_config_set.Rd index d51e410..6480733 100644 --- a/man/rl_config_set.Rd +++ b/man/rl_config_set.Rd @@ -57,7 +57,7 @@ Mainly settings for the arguments of the training() function. Not compatible with the new API stack introduced in Ray 2.10.0. } \examples{ -\donttest{ +\dontrun{ allocation_rule <- learn_allocation_rule( models, N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, @@ -66,7 +66,6 @@ allocation_rule <- learn_allocation_rule( # We change `iter` to 200 and `cores` for reinforcement learning to 2 rl_config = rl_config_set(iter = 200, cores = 2), alpha = 0.025 -) -} +)} } diff --git a/man/rl_dnn_config.Rd b/man/rl_dnn_config.Rd index 06aade3..3b2d7b0 100644 --- a/man/rl_dnn_config.Rd +++ b/man/rl_dnn_config.Rd @@ -29,7 +29,7 @@ DNN (deep neural network) configuration for reinforcement learning. For detail, see Section 3.2.6 of the original paper. } \examples{ -\donttest{ +\dontrun{ allocation_rule <- learn_allocation_rule( models, N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, @@ -42,7 +42,6 @@ allocation_rule <- learn_allocation_rule( model = rl_dnn_config(fcnet_hiddens = c(512L, 512L), fcnet_activation = "tanh") ), alpha = 0.025 -) -} +)} } diff --git a/man/simulate_one_trial.Rd b/man/simulate_one_trial.Rd index 4c2dd9a..f042d76 100644 --- a/man/simulate_one_trial.Rd +++ b/man/simulate_one_trial.Rd @@ -74,9 +74,31 @@ the estimated target dose, and the MAE. Simulate One Trial Using an Obtained Optimal Adaptive Allocation Rule } \examples{ -\donttest{ -# We computed `allocation_rule` and `adjusted_alpha` -# ... +library(RLoptimal) + +doses <- c(0, 2, 4, 6, 8) + +models <- DoseFinding::Mods( + doses = doses, maxEff = 1.65, + linear = NULL, emax = 0.79, sigEmax = c(4, 5) +) + +\dontrun{ +allocation_rule <- learn_allocation_rule( + models, + N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, + outcome_type = "continuous", sd_normal = sqrt(4.5), + seed = 123, rl_config = rl_config_set(iter = 1000), + alpha = 0.025 +) + +# Simulation-based adjustment of the significance level using `allocation_rule` +adjusted_alpha <- adjust_significance_level( + allocation_rule, models, + N_total = 150, N_ini = rep(10, 5), N_block = 10, + outcome_type = "continuous", sd_normal = sqrt(4.5), + alpha = 0.025, n_sim = 10000, seed = 123 +)} eval_models <- DoseFinding::Mods( doses = doses, maxEff = 1.65, @@ -88,13 +110,13 @@ true_response_list <- as.list(data.frame(true_response_matrix, check.names = FAL true_model_name <- "emax" # Simulate one trial using the obtained `allocation_rule` When the true model is "emax" +\dontrun{ res_one <- simulate_one_trial( allocation_rule, models, true_response = true_response_list[[true_model_name]], N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, outcome_type = "continuous", sd_normal = sqrt(4.5), alpha = adjusted_alpha, seed = simID, eval_type = "all" -) -} +)} }