Setting

rm(list=ls(all=TRUE))
setwd('C:/Users/sitdo/Documents/GitHub/IBD-EDA/paper1/')

Loading Data

library(dplyr)

载入程辑包:‘dplyr’

The following objects are masked from ‘package:stats’:

    filter, lag

The following objects are masked from ‘package:base’:

    intersect, setdiff, setequal, union
data <- read.csv("./data_preprocessed/data.csv") %>% select(-1)

Installing Packages

library(lightgbm)
Registered S3 method overwritten by 'data.table':
  method           from
  print.data.table     

载入程辑包:‘lightgbm’

The following object is masked from ‘package:dplyr’:

    slice

Method I: Splitting Data

set.seed(123)
splitting_ratio <- 0.7

indices <- 1:nrow(data)
shuffled_indices <- sample(indices) 
train_size <- floor(splitting_ratio * length(indices))

train_indices <- shuffled_indices[1:train_size]
test_indices <- shuffled_indices[(train_size + 1):length(indices)]

train_data <- data[train_indices, ]
test_data <- data[test_indices, ]
train_X <- as.matrix(train_data[, -1])
train_y <- train_data[, 1]
dtrain <- lgb.Dataset(data = train_X, label = train_y)

test_X <- as.matrix(test_data[, -1])
test_y <- test_data[, 1]
dtest <- lgb.Dataset(data = test_X, label = test_y)

Building Model

params <- list(
  objective = "binary",
  metric = "binary_logloss",
  num_iterations = 10
)

lgb_model <- lgb.train(params, data = dtrain)
[LightGBM] [Info] Number of positive: 268, number of negative: 1423
[LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.007958 seconds.
You can set `force_row_wise=true` to remove the overhead.
And if memory is not enough, you can set `force_col_wise=true`.
[LightGBM] [Info] Total Bins 297
[LightGBM] [Info] Number of data points in the train set: 1691, number of used features: 113
[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.158486 -> initscore=-1.669536
[LightGBM] [Info] Start training from score -1.669536
predictions <- predict(lgb_model, test_X)

Performance

Confusion Matrix

confusion_matrix <- table(
  as.numeric(test_y), 
  as.numeric(ifelse(predictions > 0.5, 1, 0))
)

TP <- confusion_matrix[1, 1]
TN <- confusion_matrix[2, 2]
FP <- confusion_matrix[2, 1]
FN <- confusion_matrix[1, 2]

## Calculate Accuracy
accuracy <- (TP + TN) / (TP + FP + TN + FN)
cat("Accuracy:", accuracy, "\n")
Accuracy: 0.8567493 
## Calculate Recall
recall <- TP / (TP + FN)
cat("Recall:", recall, "\n")
Recall: 0.9869707 
## Calculate Precision
precision <- TP / (TP + FP)
cat("Precision:", precision, "\n")
Precision: 0.8632479 
## Calculate Specificity
specificity <- TN / (TN + FP)
cat("Specificity:", specificity, "\n")
Specificity: 0.1428571 
## Calculate F1 Score
f1_score <- 2 * (precision * recall) / (precision + recall)
cat("F1 Score:", f1_score, "\n")
F1 Score: 0.9209726 

ROC Curve

library(pROC)
Type 'citation("pROC")' for a citation.

载入程辑包:‘pROC’

The following objects are masked from ‘package:stats’:

    cov, smooth, var
# Calculate ROC curve using the actual values and predictions
roc_obj <- roc(
  as.numeric(test_data$dod), predictions
)
Setting levels: control = 0, case = 1
Setting direction: controls < cases
# Plot the ROC curve
plot(
  roc_obj,
  col = "blue",
  main = "ROC Curve - LightGBM",
  legacy.axes = TRUE,
  print.auc = TRUE,
  print.thres = TRUE,
  grid = c(0.2, 0.2),
  grid.col = c("green", "orange")
)

Method II: Cross Validation

# Perform 10-fold cross-validation
num_folds <- 10
folds <- cut(seq(1, nrow(data)), breaks = num_folds, labels = FALSE)

# Create empty vectors to store the predictions and actual values
all_predictions <- vector()
all_actuals <- vector()

for (i in 1:num_folds) {
  # Split the data into training and test sets for the current fold
  train_data <- data[folds != i, ]
  test_data <- data[folds == i, ]
  
  train_X <- as.matrix(train_data[, -1])
  train_y <- train_data[, 1]
  dtrain <- lgb.Dataset(data = train_X, label = train_y)
  
  test_X <- as.matrix(test_data[, -1])
  test_y <- test_data[, 1]
  dtest <- lgb.Dataset(data = test_X, label = test_y)
  
  # Define the parameters for the LightGBM model
  params <- list(
    objective = "binary",
    metric = "binary_logloss",
    num_iterations = 10
  )
  
  # Train the LightGBM model
  lgb_model <- lgb.train(params, data = dtrain)
  
  # Make predictions on the test set
  predictions <- predict(lgb_model, test_X)
  
  # Append the predictions and actual values to the vectors
  all_predictions <- c(all_predictions, predictions)
  all_actuals <- c(all_actuals, test_y)
}
[LightGBM] [Info] Number of positive: 336, number of negative: 1839
[LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.008124 seconds.
You can set `force_row_wise=true` to remove the overhead.
And if memory is not enough, you can set `force_col_wise=true`.
[LightGBM] [Info] Total Bins 297
[LightGBM] [Info] Number of data points in the train set: 2175, number of used features: 113
[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.154483 -> initscore=-1.699866
[LightGBM] [Info] Start training from score -1.699866
[LightGBM] [Info] Number of positive: 343, number of negative: 1832
[LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.004998 seconds.
You can set `force_row_wise=true` to remove the overhead.
And if memory is not enough, you can set `force_col_wise=true`.
[LightGBM] [Info] Total Bins 297
[LightGBM] [Info] Number of data points in the train set: 2175, number of used features: 113
[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.157701 -> initscore=-1.675433
[LightGBM] [Info] Start training from score -1.675433
[LightGBM] [Info] Number of positive: 341, number of negative: 1835
[LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.004961 seconds.
You can set `force_row_wise=true` to remove the overhead.
And if memory is not enough, you can set `force_col_wise=true`.
[LightGBM] [Info] Total Bins 297
[LightGBM] [Info] Number of data points in the train set: 2176, number of used features: 113
[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.156710 -> initscore=-1.682917
[LightGBM] [Info] Start training from score -1.682917
[LightGBM] [Info] Number of positive: 344, number of negative: 1831
[LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.004929 seconds.
You can set `force_row_wise=true` to remove the overhead.
And if memory is not enough, you can set `force_col_wise=true`.
[LightGBM] [Info] Total Bins 297
[LightGBM] [Info] Number of data points in the train set: 2175, number of used features: 113
[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.158161 -> initscore=-1.671976
[LightGBM] [Info] Start training from score -1.671976
[LightGBM] [Info] Number of positive: 341, number of negative: 1834
[LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.004689 seconds.
You can set `force_row_wise=true` to remove the overhead.
And if memory is not enough, you can set `force_col_wise=true`.
[LightGBM] [Info] Total Bins 297
[LightGBM] [Info] Number of data points in the train set: 2175, number of used features: 113
[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.156782 -> initscore=-1.682372
[LightGBM] [Info] Start training from score -1.682372
[LightGBM] [Info] Number of positive: 332, number of negative: 1844
[LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.004929 seconds.
You can set `force_row_wise=true` to remove the overhead.
And if memory is not enough, you can set `force_col_wise=true`.
[LightGBM] [Info] Total Bins 297
[LightGBM] [Info] Number of data points in the train set: 2176, number of used features: 113
[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.152574 -> initscore=-1.714557
[LightGBM] [Info] Start training from score -1.714557
[LightGBM] [Info] Number of positive: 346, number of negative: 1829
[LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.005653 seconds.
You can set `force_row_wise=true` to remove the overhead.
And if memory is not enough, you can set `force_col_wise=true`.
[LightGBM] [Info] Total Bins 297
[LightGBM] [Info] Number of data points in the train set: 2175, number of used features: 113
[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.159080 -> initscore=-1.665086
[LightGBM] [Info] Start training from score -1.665086
[LightGBM] [Info] Number of positive: 348, number of negative: 1828
[LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.004666 seconds.
You can set `force_row_wise=true` to remove the overhead.
And if memory is not enough, you can set `force_col_wise=true`.
[LightGBM] [Info] Total Bins 296
[LightGBM] [Info] Number of data points in the train set: 2176, number of used features: 113
[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.159926 -> initscore=-1.658775
[LightGBM] [Info] Start training from score -1.658775
[LightGBM] [Info] Number of positive: 346, number of negative: 1829
[LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.004784 seconds.
You can set `force_row_wise=true` to remove the overhead.
And if memory is not enough, you can set `force_col_wise=true`.
[LightGBM] [Info] Total Bins 297
[LightGBM] [Info] Number of data points in the train set: 2175, number of used features: 113
[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.159080 -> initscore=-1.665086
[LightGBM] [Info] Start training from score -1.665086
[LightGBM] [Info] Number of positive: 343, number of negative: 1832
[LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.006509 seconds.
You can set `force_row_wise=true` to remove the overhead.
And if memory is not enough, you can set `force_col_wise=true`.
[LightGBM] [Info] Total Bins 297
[LightGBM] [Info] Number of data points in the train set: 2175, number of used features: 113
[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.157701 -> initscore=-1.675433
[LightGBM] [Info] Start training from score -1.675433

Performance

Confusion Matrix

confusion_matrix <- table(
  as.numeric(all_actuals), 
  as.numeric(ifelse(all_predictions > 0.5, 1, 0))
)

TP <- confusion_matrix[1, 1]
TN <- confusion_matrix[2, 2]
FP <- confusion_matrix[2, 1]
FN <- confusion_matrix[1, 2]

## Calculate Accuracy
accuracy <- (TP + TN) / (TP + FP + TN + FN)
cat("Accuracy:", accuracy, "\n")
Accuracy: 0.8568473 
## Calculate Recall
recall <- TP / (TP + FN)
cat("Recall:", recall, "\n")
Recall: 0.9842906 
## Calculate Precision
precision <- TP / (TP + FP)
cat("Precision:", precision, "\n")
Precision: 0.8645968 
## Calculate Specificity
specificity <- TN / (TN + FP)
cat("Specificity:", specificity, "\n")
Specificity: 0.1736842 
## Calculate F1 Score
f1_score <- 2 * (precision * recall) / (precision + recall)
cat("F1 Score:", f1_score, "\n")
F1 Score: 0.9205693 

ROC Curve

# Calculate ROC curve using the actual values and predictions
roc_obj <- roc(
  as.numeric(all_actuals), all_predictions
)
Setting levels: control = 0, case = 1
Setting direction: controls < cases
# Plot the ROC curve
plot(
  roc_obj,
  col = "blue",
  main = "ROC Curve - LightGBM (Cross Validation)",
  legacy.axes = TRUE,
  print.auc = TRUE,
  print.thres = TRUE,
  grid = c(0.2, 0.2),
  grid.col = c("green", "orange")
)

LS0tDQp0aXRsZTogIkxpZ2h0R0JNIg0Kb3V0cHV0OiANCiAgaHRtbF9ub3RlYm9vazogDQogICAgdG9jOiB0cnVlDQp0aGVtZTogY29zbW8NCi0tLQ0KICANCiMgU2V0dGluZw0KDQpgYGB7cn0NCnJtKGxpc3Q9bHMoYWxsPVRSVUUpKQ0Kc2V0d2QoJ0M6L1VzZXJzL3NpdGRvL0RvY3VtZW50cy9HaXRIdWIvSUJELUVEQS9wYXBlcjEvJykNCmBgYA0KDQojIExvYWRpbmcgRGF0YQ0KDQpgYGB7cn0NCmxpYnJhcnkoZHBseXIpDQoNCmRhdGEgPC0gcmVhZC5jc3YoIi4vZGF0YV9wcmVwcm9jZXNzZWQvZGF0YS5jc3YiKSAlPiUgc2VsZWN0KC0xKQ0KYGBgDQoNCiMgSW5zdGFsbGluZyBQYWNrYWdlcw0KDQpgYGB7cn0NCmxpYnJhcnkobGlnaHRnYm0pDQpgYGANCg0KIyBNZXRob2QgSTogU3BsaXR0aW5nIERhdGENCg0KYGBge3J9DQpzZXQuc2VlZCgxMjMpDQpzcGxpdHRpbmdfcmF0aW8gPC0gMC43DQoNCmluZGljZXMgPC0gMTpucm93KGRhdGEpDQpzaHVmZmxlZF9pbmRpY2VzIDwtIHNhbXBsZShpbmRpY2VzKSANCnRyYWluX3NpemUgPC0gZmxvb3Ioc3BsaXR0aW5nX3JhdGlvICogbGVuZ3RoKGluZGljZXMpKQ0KDQp0cmFpbl9pbmRpY2VzIDwtIHNodWZmbGVkX2luZGljZXNbMTp0cmFpbl9zaXplXQ0KdGVzdF9pbmRpY2VzIDwtIHNodWZmbGVkX2luZGljZXNbKHRyYWluX3NpemUgKyAxKTpsZW5ndGgoaW5kaWNlcyldDQoNCnRyYWluX2RhdGEgPC0gZGF0YVt0cmFpbl9pbmRpY2VzLCBdDQp0ZXN0X2RhdGEgPC0gZGF0YVt0ZXN0X2luZGljZXMsIF0NCmBgYA0KDQoNCmBgYHtyfQ0KdHJhaW5fWCA8LSBhcy5tYXRyaXgodHJhaW5fZGF0YVssIC0xXSkNCnRyYWluX3kgPC0gdHJhaW5fZGF0YVssIDFdDQpkdHJhaW4gPC0gbGdiLkRhdGFzZXQoZGF0YSA9IHRyYWluX1gsIGxhYmVsID0gdHJhaW5feSkNCg0KdGVzdF9YIDwtIGFzLm1hdHJpeCh0ZXN0X2RhdGFbLCAtMV0pDQp0ZXN0X3kgPC0gdGVzdF9kYXRhWywgMV0NCmR0ZXN0IDwtIGxnYi5EYXRhc2V0KGRhdGEgPSB0ZXN0X1gsIGxhYmVsID0gdGVzdF95KQ0KYGBgDQoNCiMjIEJ1aWxkaW5nIE1vZGVsDQoNCmBgYHtyfQ0KcGFyYW1zIDwtIGxpc3QoDQogIG9iamVjdGl2ZSA9ICJiaW5hcnkiLA0KICBtZXRyaWMgPSAiYmluYXJ5X2xvZ2xvc3MiLA0KICBudW1faXRlcmF0aW9ucyA9IDEwDQopDQoNCmxnYl9tb2RlbCA8LSBsZ2IudHJhaW4ocGFyYW1zLCBkYXRhID0gZHRyYWluKQ0KDQpwcmVkaWN0aW9ucyA8LSBwcmVkaWN0KGxnYl9tb2RlbCwgdGVzdF9YKQ0KYGBgDQoNCiMjIFBlcmZvcm1hbmNlDQoNCiMjIyBDb25mdXNpb24gTWF0cml4DQoNCmBgYHtyfQ0KY29uZnVzaW9uX21hdHJpeCA8LSB0YWJsZSgNCiAgYXMubnVtZXJpYyh0ZXN0X3kpLCANCiAgYXMubnVtZXJpYyhpZmVsc2UocHJlZGljdGlvbnMgPiAwLjUsIDEsIDApKQ0KKQ0KDQpUUCA8LSBjb25mdXNpb25fbWF0cml4WzEsIDFdDQpUTiA8LSBjb25mdXNpb25fbWF0cml4WzIsIDJdDQpGUCA8LSBjb25mdXNpb25fbWF0cml4WzIsIDFdDQpGTiA8LSBjb25mdXNpb25fbWF0cml4WzEsIDJdDQoNCiMjIENhbGN1bGF0ZSBBY2N1cmFjeQ0KYWNjdXJhY3kgPC0gKFRQICsgVE4pIC8gKFRQICsgRlAgKyBUTiArIEZOKQ0KY2F0KCJBY2N1cmFjeToiLCBhY2N1cmFjeSwgIlxuIikNCg0KIyMgQ2FsY3VsYXRlIFJlY2FsbA0KcmVjYWxsIDwtIFRQIC8gKFRQICsgRk4pDQpjYXQoIlJlY2FsbDoiLCByZWNhbGwsICJcbiIpDQoNCiMjIENhbGN1bGF0ZSBQcmVjaXNpb24NCnByZWNpc2lvbiA8LSBUUCAvIChUUCArIEZQKQ0KY2F0KCJQcmVjaXNpb246IiwgcHJlY2lzaW9uLCAiXG4iKQ0KDQojIyBDYWxjdWxhdGUgU3BlY2lmaWNpdHkNCnNwZWNpZmljaXR5IDwtIFROIC8gKFROICsgRlApDQpjYXQoIlNwZWNpZmljaXR5OiIsIHNwZWNpZmljaXR5LCAiXG4iKQ0KDQojIyBDYWxjdWxhdGUgRjEgU2NvcmUNCmYxX3Njb3JlIDwtIDIgKiAocHJlY2lzaW9uICogcmVjYWxsKSAvIChwcmVjaXNpb24gKyByZWNhbGwpDQpjYXQoIkYxIFNjb3JlOiIsIGYxX3Njb3JlLCAiXG4iKQ0KYGBgDQoNCiMjIyBST0MgQ3VydmUNCg0KYGBge3J9DQpsaWJyYXJ5KHBST0MpDQojIENhbGN1bGF0ZSBST0MgY3VydmUgdXNpbmcgdGhlIGFjdHVhbCB2YWx1ZXMgYW5kIHByZWRpY3Rpb25zDQpyb2Nfb2JqIDwtIHJvYygNCiAgYXMubnVtZXJpYyh0ZXN0X2RhdGEkZG9kKSwgcHJlZGljdGlvbnMNCikNCg0KIyBQbG90IHRoZSBST0MgY3VydmUNCnBsb3QoDQogIHJvY19vYmosDQogIGNvbCA9ICJibHVlIiwNCiAgbWFpbiA9ICJST0MgQ3VydmUgLSBMaWdodEdCTSIsDQogIGxlZ2FjeS5heGVzID0gVFJVRSwNCiAgcHJpbnQuYXVjID0gVFJVRSwNCiAgcHJpbnQudGhyZXMgPSBUUlVFLA0KICBncmlkID0gYygwLjIsIDAuMiksDQogIGdyaWQuY29sID0gYygiZ3JlZW4iLCAib3JhbmdlIikNCikNCmBgYA0KDQojIE1ldGhvZCBJSTogQ3Jvc3MgVmFsaWRhdGlvbg0KDQpgYGB7cn0NCiMgUGVyZm9ybSAxMC1mb2xkIGNyb3NzLXZhbGlkYXRpb24NCm51bV9mb2xkcyA8LSAxMA0KZm9sZHMgPC0gY3V0KHNlcSgxLCBucm93KGRhdGEpKSwgYnJlYWtzID0gbnVtX2ZvbGRzLCBsYWJlbHMgPSBGQUxTRSkNCg0KIyBDcmVhdGUgZW1wdHkgdmVjdG9ycyB0byBzdG9yZSB0aGUgcHJlZGljdGlvbnMgYW5kIGFjdHVhbCB2YWx1ZXMNCmFsbF9wcmVkaWN0aW9ucyA8LSB2ZWN0b3IoKQ0KYWxsX2FjdHVhbHMgPC0gdmVjdG9yKCkNCg0KZm9yIChpIGluIDE6bnVtX2ZvbGRzKSB7DQogICMgU3BsaXQgdGhlIGRhdGEgaW50byB0cmFpbmluZyBhbmQgdGVzdCBzZXRzIGZvciB0aGUgY3VycmVudCBmb2xkDQogIHRyYWluX2RhdGEgPC0gZGF0YVtmb2xkcyAhPSBpLCBdDQogIHRlc3RfZGF0YSA8LSBkYXRhW2ZvbGRzID09IGksIF0NCiAgDQogIHRyYWluX1ggPC0gYXMubWF0cml4KHRyYWluX2RhdGFbLCAtMV0pDQogIHRyYWluX3kgPC0gdHJhaW5fZGF0YVssIDFdDQogIGR0cmFpbiA8LSBsZ2IuRGF0YXNldChkYXRhID0gdHJhaW5fWCwgbGFiZWwgPSB0cmFpbl95KQ0KICANCiAgdGVzdF9YIDwtIGFzLm1hdHJpeCh0ZXN0X2RhdGFbLCAtMV0pDQogIHRlc3RfeSA8LSB0ZXN0X2RhdGFbLCAxXQ0KICBkdGVzdCA8LSBsZ2IuRGF0YXNldChkYXRhID0gdGVzdF9YLCBsYWJlbCA9IHRlc3RfeSkNCiAgDQogICMgRGVmaW5lIHRoZSBwYXJhbWV0ZXJzIGZvciB0aGUgTGlnaHRHQk0gbW9kZWwNCiAgcGFyYW1zIDwtIGxpc3QoDQogICAgb2JqZWN0aXZlID0gImJpbmFyeSIsDQogICAgbWV0cmljID0gImJpbmFyeV9sb2dsb3NzIiwNCiAgICBudW1faXRlcmF0aW9ucyA9IDEwDQogICkNCiAgDQogICMgVHJhaW4gdGhlIExpZ2h0R0JNIG1vZGVsDQogIGxnYl9tb2RlbCA8LSBsZ2IudHJhaW4ocGFyYW1zLCBkYXRhID0gZHRyYWluKQ0KICANCiAgIyBNYWtlIHByZWRpY3Rpb25zIG9uIHRoZSB0ZXN0IHNldA0KICBwcmVkaWN0aW9ucyA8LSBwcmVkaWN0KGxnYl9tb2RlbCwgdGVzdF9YKQ0KICANCiAgIyBBcHBlbmQgdGhlIHByZWRpY3Rpb25zIGFuZCBhY3R1YWwgdmFsdWVzIHRvIHRoZSB2ZWN0b3JzDQogIGFsbF9wcmVkaWN0aW9ucyA8LSBjKGFsbF9wcmVkaWN0aW9ucywgcHJlZGljdGlvbnMpDQogIGFsbF9hY3R1YWxzIDwtIGMoYWxsX2FjdHVhbHMsIHRlc3RfeSkNCn0NCg0KYGBgDQoNCiMjIFBlcmZvcm1hbmNlDQoNCiMjIyBDb25mdXNpb24gTWF0cml4DQoNCmBgYHtyfQ0KY29uZnVzaW9uX21hdHJpeCA8LSB0YWJsZSgNCiAgYXMubnVtZXJpYyhhbGxfYWN0dWFscyksIA0KICBhcy5udW1lcmljKGlmZWxzZShhbGxfcHJlZGljdGlvbnMgPiAwLjUsIDEsIDApKQ0KKQ0KDQpUUCA8LSBjb25mdXNpb25fbWF0cml4WzEsIDFdDQpUTiA8LSBjb25mdXNpb25fbWF0cml4WzIsIDJdDQpGUCA8LSBjb25mdXNpb25fbWF0cml4WzIsIDFdDQpGTiA8LSBjb25mdXNpb25fbWF0cml4WzEsIDJdDQoNCiMjIENhbGN1bGF0ZSBBY2N1cmFjeQ0KYWNjdXJhY3kgPC0gKFRQICsgVE4pIC8gKFRQICsgRlAgKyBUTiArIEZOKQ0KY2F0KCJBY2N1cmFjeToiLCBhY2N1cmFjeSwgIlxuIikNCg0KIyMgQ2FsY3VsYXRlIFJlY2FsbA0KcmVjYWxsIDwtIFRQIC8gKFRQICsgRk4pDQpjYXQoIlJlY2FsbDoiLCByZWNhbGwsICJcbiIpDQoNCiMjIENhbGN1bGF0ZSBQcmVjaXNpb24NCnByZWNpc2lvbiA8LSBUUCAvIChUUCArIEZQKQ0KY2F0KCJQcmVjaXNpb246IiwgcHJlY2lzaW9uLCAiXG4iKQ0KDQojIyBDYWxjdWxhdGUgU3BlY2lmaWNpdHkNCnNwZWNpZmljaXR5IDwtIFROIC8gKFROICsgRlApDQpjYXQoIlNwZWNpZmljaXR5OiIsIHNwZWNpZmljaXR5LCAiXG4iKQ0KDQojIyBDYWxjdWxhdGUgRjEgU2NvcmUNCmYxX3Njb3JlIDwtIDIgKiAocHJlY2lzaW9uICogcmVjYWxsKSAvIChwcmVjaXNpb24gKyByZWNhbGwpDQpjYXQoIkYxIFNjb3JlOiIsIGYxX3Njb3JlLCAiXG4iKQ0KDQpgYGANCg0KIyMjIFJPQyBDdXJ2ZQ0KDQpgYGB7cn0NCiMgQ2FsY3VsYXRlIFJPQyBjdXJ2ZSB1c2luZyB0aGUgYWN0dWFsIHZhbHVlcyBhbmQgcHJlZGljdGlvbnMNCnJvY19vYmogPC0gcm9jKA0KICBhcy5udW1lcmljKGFsbF9hY3R1YWxzKSwgYWxsX3ByZWRpY3Rpb25zDQopDQoNCiMgUGxvdCB0aGUgUk9DIGN1cnZlDQpwbG90KA0KICByb2Nfb2JqLA0KICBjb2wgPSAiYmx1ZSIsDQogIG1haW4gPSAiUk9DIEN1cnZlIC0gTGlnaHRHQk0gKENyb3NzIFZhbGlkYXRpb24pIiwNCiAgbGVnYWN5LmF4ZXMgPSBUUlVFLA0KICBwcmludC5hdWMgPSBUUlVFLA0KICBwcmludC50aHJlcyA9IFRSVUUsDQogIGdyaWQgPSBjKDAuMiwgMC4yKSwNCiAgZ3JpZC5jb2wgPSBjKCJncmVlbiIsICJvcmFuZ2UiKQ0KKQ0KYGBgDQo=