The hardware and bandwidth for this mirror is donated by METANET, the Webhosting and Full Service-Cloud Provider.
If you wish to report a bug, or if you are interested in having us mirror your free-software or open-source project, please feel free to contact us at mirror[@]metanet.ch.

xgboost: Survival Analysis, AFT Analysis

# nolint start
library(mlexperiments)
library(mlsurvlrnrs)

See https://github.com/kapsner/mlsurvlrnrs/blob/main/R/learner_surv_xgboost_aft.R for implementation details.

Preprocessing

Import and Prepare Data

dataset <- survival::colon |>
  data.table::as.data.table() |>
  na.omit()
dataset <- dataset[get("etype") == 2, ]

surv_cols <- c("status", "time", "rx")
feature_cols <- colnames(dataset)[3:(ncol(dataset) - 1)]

General Configurations

seed <- 123
if (isTRUE(as.logical(Sys.getenv("_R_CHECK_LIMIT_CORES_")))) {
  # on cran
  ncores <- 2L
} else {
  ncores <- ifelse(
    test = parallel::detectCores() > 4,
    yes = 4L,
    no = ifelse(
      test = parallel::detectCores() < 2L,
      yes = 1L,
      no = parallel::detectCores()
    )
  )
}
options("mlexperiments.bayesian.max_init" = 10L)
options("mlexperiments.optim.xgb.nrounds" = 100L)
options("mlexperiments.optim.xgb.early_stopping_rounds" = 10L)

Generate Training- and Test Data

split_vector <- splitTools::multi_strata(
  df = dataset[, .SD, .SDcols = surv_cols],
  strategy = "kmeans",
  k = 4
)

data_split <- splitTools::partition(
  y = split_vector,
  p = c(train = 0.7, test = 0.3),
  type = "stratified",
  seed = seed
)

train_x <- model.matrix(
  ~ -1 + .,
  dataset[
    data_split$train,
    .SD,
    .SDcols = setdiff(feature_cols, surv_cols[1:2])
  ]
)
train_y <- survival::Surv(
  event = (dataset[data_split$train, get("status")] |>
    as.character() |>
    as.integer()),
  time = dataset[data_split$train, get("time")],
  type = "right"
)
split_vector_train <- splitTools::multi_strata(
  df = dataset[data_split$train, .SD, .SDcols = surv_cols],
  strategy = "kmeans",
  k = 4
)


test_x <- model.matrix(
  ~ -1 + .,
  dataset[data_split$test, .SD, .SDcols = setdiff(feature_cols, surv_cols[1:2])]
)
test_y <- survival::Surv(
  event = (dataset[data_split$test, get("status")] |>
    as.character() |>
    as.integer()),
  time = dataset[data_split$test, get("time")],
  type = "right"
)

Generate Training Data Folds

fold_list <- splitTools::create_folds(
  y = split_vector_train,
  k = 3,
  type = "stratified",
  seed = seed
)

Experiments

Prepare Experiments

# required learner arguments, not optimized
learner_args <- list(
  objective = "survival:aft",
  eval_metric = "aft-nloglik"
)

# set arguments for predict function and performance metric,
# required for mlexperiments::MLCrossValidation and
# mlexperiments::MLNestedCV
predict_args <- NULL
performance_metric <- c_index
performance_metric_args <- NULL
return_models <- FALSE

# required for grid search and initialization of bayesian optimization
parameter_grid <- expand.grid(
  subsample = seq(0.6, 1, .2),
  colsample_bytree = seq(0.6, 1, .2),
  min_child_weight = seq(1, 5, 4),
  learning_rate = seq(0.1, 0.2, 0.1),
  max_depth = seq(1, 5, 4)
)
# reduce to a maximum of 10 rows
if (nrow(parameter_grid) > 10) {
  set.seed(123)
  sample_rows <- sample(seq_len(nrow(parameter_grid)), 10, FALSE)
  parameter_grid <- kdry::mlh_subset(parameter_grid, sample_rows)
}

# required for bayesian optimization
parameter_bounds <- list(
  subsample = c(0.2, 1),
  colsample_bytree = c(0.2, 1),
  min_child_weight = c(1L, 10L),
  learning_rate = c(0.1, 0.2),
  max_depth = c(1L, 10L)
)
optim_args <- list(
  iters.n = ncores,
  kappa = 3.5,
  acq = "ucb"
)

Hyperparameter Tuning

tuner <- mlexperiments::MLTuneParameters$new(
  learner = LearnerSurvXgboostAft$new(
    metric_optimization_higher_better = FALSE
  ),
  strategy = "grid",
  ncores = ncores,
  seed = seed
)

tuner$parameter_grid <- parameter_grid
tuner$learner_args <- learner_args
tuner$split_type <- "stratified"
tuner$split_vector <- split_vector_train

tuner$set_data(
  x = train_x,
  y = train_y
)

tuner_results_grid <- tuner$execute(k = 3)
#>
#> Parameter settings [=======================================================>-----------------------------------------------------------------------------------] 4/10 ( 40%)
#> Parameter settings [=====================================================================>---------------------------------------------------------------------] 5/10 ( 50%)
#> Parameter settings [==================================================================================>--------------------------------------------------------] 6/10 ( 60%)
#> Parameter settings [================================================================================================>------------------------------------------] 7/10 ( 70%)
#> Parameter settings [==============================================================================================================>----------------------------] 8/10 ( 80%)
#> Parameter settings [============================================================================================================================>--------------] 9/10 ( 90%)
#> Parameter settings [==========================================================================================================================================] 10/10 (100%)

head(tuner_results_grid)
#>    setting_id metric_optim_mean  nrounds subsample colsample_bytree min_child_weight learning_rate max_depth    objective eval_metric
#>         <int>             <num>    <num>     <num>            <num>            <num>         <num>     <num>       <char>      <char>
#> 1:          1          4.350527 30.33333       0.6              0.8                5           0.2         1 survival:aft aft-nloglik
#> 2:          2          4.384429 40.00000       1.0              0.8                5           0.1         5 survival:aft aft-nloglik
#> 3:          3          4.351162 65.66667       0.8              0.8                5           0.1         1 survival:aft aft-nloglik
#> 4:          4          4.408632 19.66667       0.6              0.8                5           0.2         5 survival:aft aft-nloglik
#> 5:          5          4.397901 40.66667       1.0              0.8                1           0.1         5 survival:aft aft-nloglik
#> 6:          6          4.380873 42.66667       0.8              0.8                5           0.1         5 survival:aft aft-nloglik

Bayesian Optimization

tuner <- mlexperiments::MLTuneParameters$new(
  learner = LearnerSurvXgboostAft$new(
    metric_optimization_higher_better = FALSE
  ),
  strategy = "bayesian",
  ncores = ncores,
  seed = seed
)

tuner$parameter_grid <- parameter_grid
tuner$parameter_bounds <- parameter_bounds

tuner$learner_args <- learner_args
tuner$optim_args <- optim_args

tuner$split_type <- "stratified"
tuner$split_vector <- split_vector_train

tuner$set_data(
  x = train_x,
  y = train_y
)

tuner_results_bayesian <- tuner$execute(k = 3)
#>
#> Registering parallel backend using 4 cores.

head(tuner_results_bayesian)
#>    Epoch setting_id subsample colsample_bytree min_child_weight learning_rate max_depth gpUtility acqOptimum inBounds Elapsed
#>    <num>      <int>     <num>            <num>            <num>         <num>     <num>     <num>     <lgcl>   <lgcl>   <num>
#> 1:     0          1       0.6              0.8                5           0.2         1        NA      FALSE     TRUE   1.726
#> 2:     0          2       1.0              0.8                5           0.1         5        NA      FALSE     TRUE   1.805
#> 3:     0          3       0.8              0.8                5           0.1         1        NA      FALSE     TRUE   1.817
#> 4:     0          4       0.6              0.8                5           0.2         5        NA      FALSE     TRUE   1.774
#> 5:     0          5       1.0              0.8                1           0.1         5        NA      FALSE     TRUE   0.925
#> 6:     0          6       0.8              0.8                5           0.1         5        NA      FALSE     TRUE   0.896
#>        Score metric_optim_mean  nrounds errorMessage    objective eval_metric
#>        <num>             <num>    <num>       <lgcl>       <char>      <char>
#> 1: -4.345650          4.345650 27.66667           NA survival:aft aft-nloglik
#> 2: -4.378685          4.378685 42.66667           NA survival:aft aft-nloglik
#> 3: -4.349229          4.349229 49.00000           NA survival:aft aft-nloglik
#> 4: -4.413895          4.413895 21.33333           NA survival:aft aft-nloglik
#> 5: -4.400767          4.400767 40.66667           NA survival:aft aft-nloglik
#> 6: -4.359158          4.359158 40.33333           NA survival:aft aft-nloglik

k-Fold Cross Validation

validator <- mlexperiments::MLCrossValidation$new(
  learner = LearnerSurvXgboostAft$new(
    metric_optimization_higher_better = FALSE
  ),
  fold_list = fold_list,
  ncores = ncores,
  seed = seed
)

validator$learner_args <- tuner$results$best.setting[-1]
validator$learner_args$nrounds <- floor(validator$learner_args$nrounds)
validator$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- return_models

validator$set_data(
  x = train_x,
  y = train_y
)
validator_results <- validator$execute()
#>
#> CV fold: Fold1
#>
#> CV fold: Fold2
#>
#> CV fold: Fold3

head(validator_results)
#>      fold performance subsample colsample_bytree min_child_weight learning_rate max_depth nrounds    objective eval_metric
#>    <char>       <num>     <num>            <num>            <num>         <num>     <num>   <num>       <char>      <char>
#> 1:  Fold1   0.3952192       0.6              0.8                5           0.2         1      27 survival:aft aft-nloglik
#> 2:  Fold2   0.3510979       0.6              0.8                5           0.2         1      27 survival:aft aft-nloglik
#> 3:  Fold3   0.3043296       0.6              0.8                5           0.2         1      27 survival:aft aft-nloglik

Nested Cross Validation

validator <- mlexperiments::MLNestedCV$new(
  learner = LearnerSurvXgboostAft$new(
    metric_optimization_higher_better = FALSE
  ),
  strategy = "grid",
  fold_list = fold_list,
  k_tuning = 3L,
  ncores = ncores,
  seed = seed
)

validator$parameter_grid <- parameter_grid
validator$learner_args <- learner_args
validator$split_type <- "stratified"
validator$split_vector <- split_vector_train

validator$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- return_models

validator$set_data(
  x = train_x,
  y = train_y
)

validator_results <- validator$execute()
#>
#> CV fold: Fold1
#>
#> Parameter settings [=======================================================>-----------------------------------------------------------------------------------] 4/10 ( 40%)
#> Parameter settings [=====================================================================>---------------------------------------------------------------------] 5/10 ( 50%)
#> Parameter settings [==================================================================================>--------------------------------------------------------] 6/10 ( 60%)
#> Parameter settings [================================================================================================>------------------------------------------] 7/10 ( 70%)
#> Parameter settings [==============================================================================================================>----------------------------] 8/10 ( 80%)
#> Parameter settings [============================================================================================================================>--------------] 9/10 ( 90%)
#> Parameter settings [==========================================================================================================================================] 10/10 (100%)
#> CV fold: Fold2
#> CV progress [=================================================================================================>-------------------------------------------------] 2/3 ( 67%)
#>
#> Parameter settings [=========================================>-------------------------------------------------------------------------------------------------] 3/10 ( 30%)
#> Parameter settings [=======================================================>-----------------------------------------------------------------------------------] 4/10 ( 40%)
#> Parameter settings [=====================================================================>---------------------------------------------------------------------] 5/10 ( 50%)
#> Parameter settings [==================================================================================>--------------------------------------------------------] 6/10 ( 60%)
#> Parameter settings [================================================================================================>------------------------------------------] 7/10 ( 70%)
#> Parameter settings [==============================================================================================================>----------------------------] 8/10 ( 80%)
#> Parameter settings [============================================================================================================================>--------------] 9/10 ( 90%)
#> Parameter settings [==========================================================================================================================================] 10/10 (100%)
#> CV fold: Fold3
#> CV progress [===================================================================================================================================================] 3/3 (100%)
#>
#> Parameter settings [=======================================================>-----------------------------------------------------------------------------------] 4/10 ( 40%)
#> Parameter settings [=====================================================================>---------------------------------------------------------------------] 5/10 ( 50%)
#> Parameter settings [==================================================================================>--------------------------------------------------------] 6/10 ( 60%)
#> Parameter settings [================================================================================================>------------------------------------------] 7/10 ( 70%)
#> Parameter settings [==============================================================================================================>----------------------------] 8/10 ( 80%)
#> Parameter settings [============================================================================================================================>--------------] 9/10 ( 90%)
#> Parameter settings [==========================================================================================================================================] 10/10 (100%)

head(validator_results)
#>      fold performance  nrounds subsample colsample_bytree min_child_weight learning_rate max_depth    objective eval_metric
#>    <char>       <num>    <num>     <num>            <num>            <num>         <num>     <num>       <char>      <char>
#> 1:  Fold1   0.3940442 32.66667       0.6              0.8                5           0.2         1 survival:aft aft-nloglik
#> 2:  Fold2   0.3409029 29.66667       0.6              0.8                5           0.2         1 survival:aft aft-nloglik
#> 3:  Fold3   0.3188261 54.33333       0.8              0.8                5           0.1         1 survival:aft aft-nloglik

Inner Bayesian Optimization

validator <- mlexperiments::MLNestedCV$new(
  learner = LearnerSurvXgboostAft$new(
    metric_optimization_higher_better = FALSE
  ),
  strategy = "bayesian",
  fold_list = fold_list,
  k_tuning = 3L,
  ncores = ncores,
  seed = 312
)

validator$parameter_grid <- parameter_grid
validator$learner_args <- learner_args
validator$split_type <- "stratified"
validator$split_vector <- split_vector_train


validator$parameter_bounds <- parameter_bounds
validator$optim_args <- optim_args

validator$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- TRUE

validator$set_data(
  x = train_x,
  y = train_y
)

validator_results <- validator$execute()
#>
#> CV fold: Fold1
#>
#> Registering parallel backend using 4 cores.
#>
#> CV fold: Fold2
#> CV progress [=================================================================================================>-------------------------------------------------] 2/3 ( 67%)
#>
#> Registering parallel backend using 4 cores.
#>
#> CV fold: Fold3
#> CV progress [===================================================================================================================================================] 3/3 (100%)
#>
#> Registering parallel backend using 4 cores.

head(validator_results)
#>      fold performance subsample colsample_bytree min_child_weight learning_rate max_depth  nrounds
#>    <char>       <num>     <num>            <num>            <num>         <num>     <num>    <num>
#> 1:  Fold1   0.3778285 0.2000000        0.8372285                1     0.1290960         1 46.66667
#> 2:  Fold2   0.3532460 0.7222891        0.9914938                1     0.1085742         1 61.00000
#> 3:  Fold3   0.3159590 0.6000000        1.0000000                1     0.2000000         1 28.66667
#>       objective eval_metric
#>          <char>      <char>
#> 1: survival:aft aft-nloglik
#> 2: survival:aft aft-nloglik
#> 3: survival:aft aft-nloglik

Holdout Test Dataset Performance

Predict Outcome in Holdout Test Dataset

preds_xgboost <- mlexperiments::predictions(
  object = validator,
  newdata = test_x
)

Evaluate Performance on Holdout Test Dataset

perf_xgboost <- mlexperiments::performance(
  object = validator,
  prediction_results = preds_xgboost,
  y_ground_truth = test_y
)
perf_xgboost
#>     model performance
#>    <char>       <num>
#> 1:  Fold1   0.3395226
#> 2:  Fold2   0.3495002
#> 3:  Fold3   0.3455662

These binaries (installable software) and packages are in development.
They may not be fully stable and should be used with caution. We make no claims about them.