diff --git a/pkg/caret/R/aaa.R b/pkg/caret/R/aaa.R index 91367a9f..88bd0613 100644 --- a/pkg/caret/R/aaa.R +++ b/pkg/caret/R/aaa.R @@ -94,7 +94,7 @@ if(getRversion() >= "2.15.1"){ ## nominalTrainWorkflow: no visible binding for global variable 'Resample' ## oobTrainWorkflow: no visible binding for global variable 'parm' ## - ## result <- foreach(iter = seq(along = resampleIndex), + ## result <- foreach(iter = seq(along.with = resampleIndex), ## .combine = "c", .verbose = FALSE, ## .packages = "caret", .errorhandling = "stop") %:% ## foreach(parm = 1:nrow(info$loop), .combine = "c", diff --git a/pkg/caret/R/adaptive.R b/pkg/caret/R/adaptive.R index 22bbe565..b25f1658 100644 --- a/pkg/caret/R/adaptive.R +++ b/pkg/caret/R/adaptive.R @@ -22,12 +22,12 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, pkgs <- c("methods", "caret") if(!is.null(method$library)) pkgs <- c(pkgs, method$library) - init_index <- seq(along = resampleIndex)[1:(ctrl$adaptive$min-1)] - extra_index <- seq(along = resampleIndex)[-(1:(ctrl$adaptive$min-1))] + init_index <- seq(along.with = resampleIndex)[1:(ctrl$adaptive$min-1)] + extra_index <- seq(along.with = resampleIndex)[-(1:(ctrl$adaptive$min-1))] keep_pred <- isTRUE(ctrl$savePredictions) || ctrl$savePredictions %in% c("all", "final") - init_result <- foreach(iter = seq(along = init_index), + init_result <- foreach(iter = seq(along.with = init_index), .combine = "c", .verbose = FALSE, .errorhandling = "stop") %:% @@ -91,14 +91,14 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, nPred <- length(holdoutIndex) if(!is.null(lev)) { predicted <- rep("", nPred) - predicted[seq(along = predicted)] <- NA + predicted[seq(along.with = predicted)] <- NA } else { predicted <- rep(NA, nPred) } if(!is.null(submod)) { tmp <- predicted predicted <- vector(mode = "list", length = nrow(info$submodels[[parm]]) + 1) - for(i in seq(along = predicted)) predicted[[i]] <- tmp + for(i in seq(along.with = predicted)) predicted[[i]] <- tmp rm(tmp) } } @@ -117,14 +117,14 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, nPred <- length(holdoutIndex) if(!is.null(lev)) { predicted <- rep("", nPred) - predicted[seq(along = predicted)] <- NA + predicted[seq(along.with = predicted)] <- NA } else { predicted <- rep(NA, nPred) } if(!is.null(submod)) { tmp <- predicted predicted <- vector(mode = "list", length = nrow(info$submodels[[parm]]) + 1) - for(i in seq(along = predicted)) predicted[[i]] <- tmp + for(i in seq(along.with = predicted)) predicted[[i]] <- tmp rm(tmp) } } @@ -145,7 +145,7 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, { tmp <- probValues probValues <- vector(mode = "list", length = nrow(info$submodels[[parm]]) + 1) - for(i in seq(along = probValues)) probValues[[i]] <- tmp + for(i in seq(along.with = probValues)) probValues[[i]] <- tmp rm(tmp) } } @@ -175,12 +175,12 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, ## same for the class probabilities if(ctrl$classProbs) { - for(k in seq(along = predicted)) predicted[[k]] <- cbind(predicted[[k]], probValues[[k]]) + for(k in seq(along.with = predicted)) predicted[[k]] <- cbind(predicted[[k]], probValues[[k]]) } if(keep_pred) { tmpPred <- predicted - for(modIndex in seq(along = tmpPred)) + for(modIndex in seq(along.with = tmpPred)) { tmpPred[[modIndex]]$rowIndex <- holdoutIndex tmpPred[[modIndex]] <- merge(tmpPred[[modIndex]], @@ -201,7 +201,7 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, if(length(lev) > 1) { cells <- lapply(predicted, function(x) flatTable(x$pred, x$obs)) - for(ind in seq(along = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) + for(ind in seq(along.with = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) } thisResample <- do.call("rbind", thisResample) thisResample <- cbind(allParam, thisResample) @@ -315,14 +315,14 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, nPred <- length(holdoutIndex) if(!is.null(lev)) { predicted <- rep("", nPred) - predicted[seq(along = predicted)] <- NA + predicted[seq(along.with = predicted)] <- NA } else { predicted <- rep(NA, nPred) } if(!is.null(submod)) { tmp <- predicted predicted <- vector(mode = "list", length = nrow(new_info$submodels[[parm]]) + 1) - for(i in seq(along = predicted)) predicted[[i]] <- tmp + for(i in seq(along.with = predicted)) predicted[[i]] <- tmp rm(tmp) } } @@ -341,14 +341,14 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, nPred <- length(holdoutIndex) if(!is.null(lev)) { predicted <- rep("", nPred) - predicted[seq(along = predicted)] <- NA + predicted[seq(along.with = predicted)] <- NA } else { predicted <- rep(NA, nPred) } if(!is.null(submod)) { tmp <- predicted predicted <- vector(mode = "list", length = nrow(new_info$submodels[[parm]]) + 1) - for(i in seq(along = predicted)) predicted[[i]] <- tmp + for(i in seq(along.with = predicted)) predicted[[i]] <- tmp rm(tmp) } } @@ -369,7 +369,7 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, { tmp <- probValues probValues <- vector(mode = "list", length = nrow(new_info$submodels[[parm]]) + 1) - for(i in seq(along = probValues)) probValues[[i]] <- tmp + for(i in seq(along.with = probValues)) probValues[[i]] <- tmp rm(tmp) } } @@ -400,12 +400,12 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, ## same for the class probabilities if(ctrl$classProbs) { - for(k in seq(along = predicted)) predicted[[k]] <- cbind(predicted[[k]], probValues[[k]]) + for(k in seq(along.with = predicted)) predicted[[k]] <- cbind(predicted[[k]], probValues[[k]]) } if(keep_pred) { tmpPred <- predicted - for(modIndex in seq(along = tmpPred)) + for(modIndex in seq(along.with = tmpPred)) { tmpPred[[modIndex]]$rowIndex <- holdoutIndex tmpPred[[modIndex]] <- merge(tmpPred[[modIndex]], @@ -426,7 +426,7 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, if(length(lev) > 1) { cells <- lapply(predicted, function(x) flatTable(x$pred, x$obs)) - for(ind in seq(along = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) + for(ind in seq(along.with = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) } thisResample <- do.call("rbind", thisResample) thisResample <- cbind(allParam, thisResample) @@ -541,7 +541,7 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, printed <- format(new_info$loop, digits = 4) colnames(printed) <- gsub("^\\.", "", colnames(printed)) - final_index <- seq(along = resampleIndex)[(last_iter+1):length(ctrl$index)] + final_index <- seq(along.with = resampleIndex)[(last_iter+1):length(ctrl$index)] final_result <- foreach(iter = final_index, .combine = "c", .verbose = FALSE) %:% @@ -604,14 +604,14 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, nPred <- length(holdoutIndex) if(!is.null(lev)) { predicted <- rep("", nPred) - predicted[seq(along = predicted)] <- NA + predicted[seq(along.with = predicted)] <- NA } else { predicted <- rep(NA, nPred) } if(!is.null(submod)) { tmp <- predicted predicted <- vector(mode = "list", length = nrow(info$submodels[[parm]]) + 1) - for(i in seq(along = predicted)) predicted[[i]] <- tmp + for(i in seq(along.with = predicted)) predicted[[i]] <- tmp rm(tmp) } } @@ -630,14 +630,14 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, nPred <- length(holdoutIndex) if(!is.null(lev)) { predicted <- rep("", nPred) - predicted[seq(along = predicted)] <- NA + predicted[seq(along.with = predicted)] <- NA } else { predicted <- rep(NA, nPred) } if(!is.null(submod)) { tmp <- predicted predicted <- vector(mode = "list", length = nrow(info$submodels[[parm]]) + 1) - for(i in seq(along = predicted)) predicted[[i]] <- tmp + for(i in seq(along.with = predicted)) predicted[[i]] <- tmp rm(tmp) } } @@ -658,7 +658,7 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, { tmp <- probValues probValues <- vector(mode = "list", length = nrow(info$submodels[[parm]]) + 1) - for(i in seq(along = probValues)) probValues[[i]] <- tmp + for(i in seq(along.with = probValues)) probValues[[i]] <- tmp rm(tmp) } } @@ -689,12 +689,12 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, ## same for the class probabilities if(ctrl$classProbs) { - for(k in seq(along = predicted)) predicted[[k]] <- cbind(predicted[[k]], probValues[[k]]) + for(k in seq(along.with = predicted)) predicted[[k]] <- cbind(predicted[[k]], probValues[[k]]) } if(keep_pred) { tmpPred <- predicted - for(modIndex in seq(along = tmpPred)) + for(modIndex in seq(along.with = tmpPred)) { tmpPred[[modIndex]]$rowIndex <- holdoutIndex tmpPred[[modIndex]] <- merge(tmpPred[[modIndex]], @@ -715,7 +715,7 @@ adaptiveWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, if(length(lev) > 1) { cells <- lapply(predicted, function(x) flatTable(x$pred, x$obs)) - for(ind in seq(along = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) + for(ind in seq(along.with = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) } thisResample <- do.call("rbind", thisResample) thisResample <- cbind(allParam, thisResample) diff --git a/pkg/caret/R/additive.R b/pkg/caret/R/additive.R index 77461dc5..4232a71a 100644 --- a/pkg/caret/R/additive.R +++ b/pkg/caret/R/additive.R @@ -10,11 +10,11 @@ additivePlot <- function(x, data, n = 100, quant = 0, plot = TRUE, ...) function(x, len, q) list(seq = seq( quantile(x, na.rm = TRUE, probs = q), quantile(x, na.rm = TRUE, probs = 1 - q), - length = len), + length.out = len), var = ""), len = n, q = quant) - for(i in seq(along = seqs)) seqs[[i]]$var <- colnames(data)[i] + for(i in seq(along.with = seqs)) seqs[[i]]$var <- colnames(data)[i] meds <- lapply(data, function(x, len) rep(median(x, na.rm = TRUE), len), len = n) diff --git a/pkg/caret/R/avNNet.R b/pkg/caret/R/avNNet.R index d2a1ec0e..5cfc2c92 100644 --- a/pkg/caret/R/avNNet.R +++ b/pkg/caret/R/avNNet.R @@ -113,7 +113,7 @@ avNNet.default <- function(x, y, repeats = 5, ## check for factors ## this is from nnet.formula - ind <- seq(along = y) + ind <- seq(along.with = y) if(is.factor(y)) { classLev <- levels(y) diff --git a/pkg/caret/R/bag.R b/pkg/caret/R/bag.R index ba7fe470..4505bf86 100644 --- a/pkg/caret/R/bag.R +++ b/pkg/caret/R/bag.R @@ -133,7 +133,7 @@ bagControl <- function( { freaks <- table(subY) smallFreak <- min(freaks) - splitUp <- split(seq(along = subY), subY) + splitUp <- split(seq(along.with = subY), subY) splitUp <- lapply(splitUp, sample, size = smallFreak) @@ -173,7 +173,7 @@ bagControl <- function( btSamples <- createResample(y, times = B) `%op%` <- if(bagControl$allowParallel) `%dopar%` else `%do%` - btFits <- foreach(iter = seq(along = btSamples), + btFits <- foreach(iter = seq(along.with = btSamples), .verbose = FALSE, .packages = "caret", .errorhandling = "stop") %op% diff --git a/pkg/caret/R/bagEarth.R b/pkg/caret/R/bagEarth.R index 5add1102..67b638a9 100644 --- a/pkg/caret/R/bagEarth.R +++ b/pkg/caret/R/bagEarth.R @@ -260,7 +260,7 @@ requireNamespaceQuietStop("earth") ## get oob predictions getTrainPred <- function(x) { - oobIndex <- seq(along = x$fitted.values) + oobIndex <- seq(along.with = x$fitted.values) oobIndex <- oobIndex[!(oobIndex %in% unique(x$index))] data.frame(pred = x$fitted.values[oobIndex], sample = oobIndex) diff --git a/pkg/caret/R/bagFDA.R b/pkg/caret/R/bagFDA.R index 1ec276e8..eac4270d 100644 --- a/pkg/caret/R/bagFDA.R +++ b/pkg/caret/R/bagFDA.R @@ -204,7 +204,7 @@ function(object, newdata = NULL, type = "class", ...) } pred <- rbind.fill(pred) out <- ddply(pred, .(sample), - function(x) colMeans(x[,seq(along = object$levels)], na.rm = TRUE)) + function(x) colMeans(x[,seq(along.with = object$levels)], na.rm = TRUE)) out <- out[,-1,drop = FALSE] rownames(out) <- rownames(newdata) predClass <- object$levels[apply(out, 1, which.max)] diff --git a/pkg/caret/R/classDist.R b/pkg/caret/R/classDist.R index 98ae1563..cbe1fd3d 100644 --- a/pkg/caret/R/classDist.R +++ b/pkg/caret/R/classDist.R @@ -75,7 +75,7 @@ classDist.default <- function(x, y, groups = 5, if(is.numeric(y)) { y <- cut(y, - unique(quantile(y, probs = seq(0, 1, length = groups + 1))), + unique(quantile(y, probs = seq(0, 1, length.out = groups + 1))), include.lowest = TRUE) classLabels <- paste(round((1:groups)/groups*100, 2)) y <- factor(y) diff --git a/pkg/caret/R/classLevels.R b/pkg/caret/R/classLevels.R index d2f2bb4b..62dfd86f 100644 --- a/pkg/caret/R/classLevels.R +++ b/pkg/caret/R/classLevels.R @@ -13,7 +13,7 @@ levels.train <- function(x, ...) { } else code <- x$modelInfo if(!is.null(code$levels)){ checkInstall(code$library) - for(i in seq(along = code$library)) + for(i in seq(along.with = code$library)) do.call("requireNamespaceQuietStop", list(package = code$library[i])) out <- code$levels(x$finalModel, ...) } else out <- NULL diff --git a/pkg/caret/R/confusionMatrix.R b/pkg/caret/R/confusionMatrix.R index ceea8fc4..b75437c8 100644 --- a/pkg/caret/R/confusionMatrix.R +++ b/pkg/caret/R/confusionMatrix.R @@ -281,7 +281,7 @@ confusionMatrix.table <- function(data, positive = NULL, tableStats <- matrix(NA, nrow = length(classLevels), ncol = 11) - for(i in seq(along = classLevels)) { + for(i in seq(along.with = classLevels)) { pos <- classLevels[i] neg <- classLevels[!(classLevels %in% classLevels[i])] prev <- if(is.null(prevalence)) sum(data[, pos])/sum(data) else prevalence[pos] diff --git a/pkg/caret/R/createDataPartition.R b/pkg/caret/R/createDataPartition.R index 6d594a02..4eb78cf4 100644 --- a/pkg/caret/R/createDataPartition.R +++ b/pkg/caret/R/createDataPartition.R @@ -116,7 +116,7 @@ createDataPartition <- function (y, times = 1, p = 0.5, list = TRUE, groups = mi if(is.numeric(y)) { y <- cut(y, - unique(quantile(y, probs = seq(0, 1, length = groups))), + unique(quantile(y, probs = seq(0, 1, length.out = groups))), include.lowest = TRUE) } else { xtab <- table(y) @@ -144,7 +144,7 @@ createDataPartition <- function (y, times = 1, p = 0.5, list = TRUE, groups = mi } for (j in 1:times) { - tmp <- dlply(data.frame(y = y, index = seq(along = y)), + tmp <- dlply(data.frame(y = y, index = seq(along.with = y)), .(y), subsample, p = p) tmp <- sort(as.vector(unlist(tmp))) out[[j]] <- tmp @@ -180,7 +180,7 @@ createDataPartition <- function (y, times = 1, p = 0.5, list = TRUE, groups = mi cuts <- floor(length(y)/k) if(cuts < 2) cuts <- 2 if(cuts > 5) cuts <- 5 - breaks <- unique(quantile(y, probs = seq(0, 1, length = cuts))) + breaks <- unique(quantile(y, probs = seq(0, 1, length.out = cuts))) y <- cut(y, breaks, include.lowest = TRUE) } @@ -212,12 +212,12 @@ createDataPartition <- function (y, times = 1, p = 0.5, list = TRUE, groups = mi foldVector[which(y == names(numInClass)[i])] <- sample(1:k, size = numInClass[i]) } } - } else foldVector <- seq(along = y) + } else foldVector <- seq(along.with = y) if(list) { - out <- split(seq(along = y), foldVector) - names(out) <- paste("Fold", gsub(" ", "0", format(seq(along = out))), sep = "") - if(returnTrain) out <- lapply(out, function(data, y) y[-data], y = seq(along = y)) + out <- split(seq(along.with = y), foldVector) + names(out) <- paste("Fold", gsub(" ", "0", format(seq(along.with = out))), sep = "") + if(returnTrain) out <- lapply(out, function(data, y) y[-data], y = seq(along.with = y)) } else out <- foldVector out } @@ -230,7 +230,7 @@ createMultiFolds <- function(y, k = 10, times = 5) { for(i in 1:times) { tmp <- createFolds(y, k = k, list = TRUE, returnTrain = TRUE) names(tmp) <- paste("Fold", - gsub(" ", "0", format(seq(along = tmp))), + gsub(" ", "0", format(seq(along.with = tmp))), ".", prettyNums[i], sep = "") @@ -292,8 +292,8 @@ make_resamples <- function(ctrl_obj, outcome) { ctrl_obj$index <- switch(tolower(ctrl_obj$method), oob = NULL, - none = list(seq(along = outcome)), - apparent = list(all = seq(along = outcome)), + none = list(seq(along.with = outcome)), + apparent = list(all = seq(along.with = outcome)), alt_cv =, cv = createFolds(outcome, ctrl_obj$number, returnTrain = TRUE), repeatedcv =, adaptive_cv = createMultiFolds(outcome, ctrl_obj$number, ctrl_obj$repeats), loocv = createFolds(outcome, n, returnTrain = TRUE), @@ -301,7 +301,7 @@ make_resamples <- function(ctrl_obj, outcome) { adaptive_boot = createResample(outcome, ctrl_obj$number), test = createDataPartition(outcome, 1, ctrl_obj$p), adaptive_lgocv =, lgocv = createDataPartition(outcome, ctrl_obj$number, ctrl_obj$p), - timeslice = createTimeSlices(seq(along = outcome), + timeslice = createTimeSlices(seq(along.with = outcome), initialWindow = ctrl_obj$initialWindow, horizon = ctrl_obj$horizon, fixedWindow = ctrl_obj$fixedWindow, @@ -319,7 +319,7 @@ make_resamples <- function(ctrl_obj, outcome) { } if(ctrl_obj$method == "apparent") - ctrl_obj$indexOut <- list(all = seq(along = outcome)) + ctrl_obj$indexOut <- list(all = seq(along.with = outcome)) ## Create holdout indices if(is.null(ctrl_obj$indexOut) && ctrl_obj$method != "oob"){ @@ -328,7 +328,7 @@ make_resamples <- function(ctrl_obj, outcome) { if (inherits(outcome, "Surv")) 1:nrow(outcome) else - seq(along = outcome) + seq(along.with = outcome) ctrl_obj$indexOut <- lapply(ctrl_obj$index, function(training) setdiff(y_index, training)) @@ -340,7 +340,7 @@ make_resamples <- function(ctrl_obj, outcome) { names(ctrl_obj$indexOut) <- prettySeq(ctrl_obj$indexOut) } else { ctrl_obj$indexOut <- - createTimeSlices(seq(along = outcome), + createTimeSlices(seq(along.with = outcome), initialWindow = ctrl_obj$initialWindow, horizon = ctrl_obj$horizon, fixedWindow = ctrl_obj$fixedWindow, diff --git a/pkg/caret/R/createResample.R b/pkg/caret/R/createResample.R index 91ea02c8..3f855002 100644 --- a/pkg/caret/R/createResample.R +++ b/pkg/caret/R/createResample.R @@ -8,7 +8,7 @@ createResample <- function(y, times = 10, list = TRUE) { out <- apply( trainIndex, 2, function(data) { - index <- seq(along = data) + index <- seq(along.with = data) out <- sort(sample(index, size = length(index), replace = TRUE)) out diff --git a/pkg/caret/R/extractPrediction.R b/pkg/caret/R/extractPrediction.R index 3712742d..a2a7d066 100644 --- a/pkg/caret/R/extractPrediction.R +++ b/pkg/caret/R/extractPrediction.R @@ -31,7 +31,7 @@ extractPrediction <- function(models, if(verbose) cat("There were ", sum(hasNa), "rows with missing values\n\n") } - for(i in seq(along = models)) + for(i in seq(along.with = models)) { if(!unkOnly) { diff --git a/pkg/caret/R/extractProb.R b/pkg/caret/R/extractProb.R index 8c13fb0c..0a70f26d 100644 --- a/pkg/caret/R/extractProb.R +++ b/pkg/caret/R/extractProb.R @@ -38,7 +38,7 @@ extractProb <- function(models, if(verbose) cat("There were ", sum(hasNa), "rows with missing values\n\n"); flush.console() } - for(i in seq(along = models)) + for(i in seq(along.with = models)) { if(verbose) cat("starting ", models[[i]]$method, "\n"); flush.console() if(!unkOnly) { diff --git a/pkg/caret/R/featurePlot.R b/pkg/caret/R/featurePlot.R index a2187e31..86b92220 100644 --- a/pkg/caret/R/featurePlot.R +++ b/pkg/caret/R/featurePlot.R @@ -66,7 +66,7 @@ function(x, y, lineInfo <- trellis.par.get("superpose.line") pointInfo <- trellis.par.get("superpose.symbol") uniqueGroups <- sort(unique(groups)) - for (i in seq(along=uniqueGroups)) + for (i in seq(along.with=uniqueGroups)) { id <- which(groups[subscripts] == uniqueGroups[i]) panel.xyplot(x[id], y[id], pch = pointInfo$pch[i], diff --git a/pkg/caret/R/gafs.R b/pkg/caret/R/gafs.R index c41afcfb..96bcc76d 100644 --- a/pkg/caret/R/gafs.R +++ b/pkg/caret/R/gafs.R @@ -136,7 +136,7 @@ ga_func_check <- function(x) { #' @export gafs_initial gafs_initial <- function (vars, popSize, ...) { x <- matrix(NA, nrow = popSize, ncol = vars) - probs <- seq(.9, .1, length = popSize) + probs <- seq(.9, .1, length.out = popSize) for(i in 1:popSize){ x[i,] <- sample(0:1, replace = TRUE, size = vars, @@ -997,7 +997,7 @@ gafs <- function (x, ...) UseMethod("gafs") gafsControl$indexOut <- lapply(gafsControl$index, function(training, allSamples) allSamples[-unique(training)], - allSamples = seq(along = y) + allSamples = seq(along.with = y) ) names(gafsControl$indexOut) <- getFromNamespace("prettySeq", "caret")(gafsControl$indexOut) @@ -1017,7 +1017,7 @@ gafs <- function (x, ...) UseMethod("gafs") obs = sample(y, min(10, length(y)))) if(is.factor(y)) - for(i in seq(along = classLevels)) + for(i in seq(along.with = classLevels)) testOutput[, classLevels[i]] <- runif(nrow(testOutput)) test <- gafsControl$functions$fitness_extern(testOutput, lev = classLevels) @@ -1041,7 +1041,7 @@ gafs <- function (x, ...) UseMethod("gafs") result <- foreach( - i = seq(along = gafsControl$index), + i = seq(along.with = gafsControl$index), .combine = "c", .verbose = FALSE, .errorhandling = "stop") %op% { ga_select( @@ -1094,9 +1094,9 @@ gafs <- function (x, ...) UseMethod("gafs") in_holdout <- createDataPartition(y, p = gafsControl$holdout, list = FALSE) - in_model <- seq(along = y)[-unique(in_holdout)] + in_model <- seq(along.with = y)[-unique(in_holdout)] } else { - in_model <- seq(along = y) + in_model <- seq(along.with = y) in_holdout <- NULL } final_ga <- ga_select( @@ -1518,7 +1518,7 @@ update.gafs <- function(object, iter, x, y, ...) { gafsControl$indexOut <- lapply(gafsControl$index, function(training, allSamples) allSamples[-unique(training)], - allSamples = seq(along = y) + allSamples = seq(along.with = y) ) names(gafsControl$indexOut) <- getFromNamespace("prettySeq", "caret")(gafsControl$indexOut) @@ -1538,7 +1538,7 @@ update.gafs <- function(object, iter, x, y, ...) { obs = sample(y, min(10, length(y)))) if(is.factor(y)) - for(i in seq(along = classLevels)) + for(i in seq(along.with = classLevels)) testOutput[, classLevels[i]] <- runif(nrow(testOutput)) if(!is.null(perf_data)) testOutput <- cbind( @@ -1567,7 +1567,7 @@ update.gafs <- function(object, iter, x, y, ...) { result <- foreach( - i = seq(along = gafsControl$index), + i = seq(along.with = gafsControl$index), .combine = "c", .verbose = FALSE, .errorhandling = "stop") %op% { ga_select( @@ -1622,9 +1622,9 @@ update.gafs <- function(object, iter, x, y, ...) { in_holdout <- createDataPartition(y, p = gafsControl$holdout, list = FALSE) - in_model <- seq(along = y)[-unique(in_holdout)] + in_model <- seq(along.with = y)[-unique(in_holdout)] } else { - in_model <- seq(along = y) + in_model <- seq(along.with = y) in_holdout <- NULL } final_ga <- ga_select( diff --git a/pkg/caret/R/ggplot.R b/pkg/caret/R/ggplot.R index 1e239ae0..4516a611 100644 --- a/pkg/caret/R/ggplot.R +++ b/pkg/caret/R/ggplot.R @@ -181,7 +181,7 @@ random_search_plot <- function(x, metric = x$metric[1]) { p_names <- as.character(params$parameter) exclude <- NULL - for(i in seq(along = p_names)) { + for(i in seq(along.with = p_names)) { if(all(is.na(x$results[, p_names[i]]))) exclude <- c(exclude, i) } diff --git a/pkg/caret/R/heldout.R b/pkg/caret/R/heldout.R index ebf0b042..f4c589bf 100644 --- a/pkg/caret/R/heldout.R +++ b/pkg/caret/R/heldout.R @@ -106,7 +106,7 @@ oob_pred.list <- function(x, direction = "wide", what = "both", ...) { nms <- names(oob) if(is.null(nms)) nms <- well_numbered("Model", length(oob)) - for(i in seq(along = nms)) oob[[i]]$.label <- nms[i] + for(i in seq(along.with = nms)) oob[[i]]$.label <- nms[i] oob <- rbind.fill(oob) if(length(table(table(oob$n))) > 1) stop("Some averages have different sample sizes than others") diff --git a/pkg/caret/R/learning_curve.R b/pkg/caret/R/learning_curve.R index 64fbd6d2..2852ef96 100644 --- a/pkg/caret/R/learning_curve.R +++ b/pkg/caret/R/learning_curve.R @@ -79,7 +79,7 @@ learning_curve_dat <- function(dat, resampled <- vector(mode = "list", length = n_size) tested <- if(test_prop > 0) resampled else NULL apparent <- resampled - for(i in seq(along = proportion)) { + for(i in seq(along.with = proportion)) { if(verbose) cat("Training for ", round(proportion[i]*100, 1), "% (n = ", floor(n*proportion[i]), ")\n", sep = "") in_mod <- if(proportion[i] < 1) sample(for_model, size = floor(n*proportion[i])) else for_model diff --git a/pkg/caret/R/lift.R b/pkg/caret/R/lift.R index 6f2cd1dd..e397750b 100644 --- a/pkg/caret/R/lift.R +++ b/pkg/caret/R/lift.R @@ -162,7 +162,7 @@ lift.formula <- function(x, data = NULL, if(!is.null(labels)) { plotData$originalName <- plotData$liftModelVar plotData$liftModelVar <- as.character(plotData$liftModelVar) - for(i in seq(along = labels)) plotData$liftModelVar[plotData$liftModelVar == names(labels)[i]] <- labels[i] + for(i in seq(along.with = labels)) plotData$liftModelVar[plotData$liftModelVar == names(labels)[i]] <- labels[i] plotData$liftModelVar <- factor(plotData$liftModelVar, levels = labels) } @@ -233,7 +233,7 @@ liftCalc <- function(x, class = levels(x$liftClassVar)[1], cuts = NULL) { baseline <- mean(x$liftClassVar == class) if(!is.null(cuts)) { if(length(cuts) == 1) { - cuts <- rev(seq(0, 1, length = cuts)) + cuts <- rev(seq(0, 1, length.out = cuts)) } else { cuts <- unique(c(1, sort(cuts, decreasing = TRUE), 0)) } @@ -249,7 +249,7 @@ liftCalc <- function(x, class = levels(x$liftClassVar)[1], cuts = NULL) { n = NA, Sn = NA, Sp = NA) - for(i in seq(along = cuts)) { + for(i in seq(along.with = cuts)) { sub <- x$liftClassVar[x$liftProbVar >= tmp$cuts[i]] tmp$n[i] <- length(sub) tmp$events[i] <- sum(sub == class) @@ -342,7 +342,7 @@ panel.lift2 <- function (x, y, pct = 0, values = NULL, ...) { if(any(names(theDots) == "groups")) { dat <- data.frame(x = x, y = y, groups = theDots$groups) ung <- unique(dat$groups) - for(i in seq(along = ung)) { + for(i in seq(along.with = ung)) { dat0 <- subset(dat, groups == ung[i]) plotRef(dat0$x, dat0$y, values, iter = i) } @@ -464,7 +464,7 @@ get_ref_point <- function(dat, v, window = 5) { res <- data.frame(CumEventPct = v, CumTestedPct = NA) - for(i in seq(along = v)) { + for(i in seq(along.with = v)) { nearest <- which.min((y - v[i])^2) index <- max(1, nearest - window):min(length(y), nearest + window) res$CumTestedPct[i] <- diff --git a/pkg/caret/R/maxDissim.R b/pkg/caret/R/maxDissim.R index 4baf18fc..3e23c2fc 100644 --- a/pkg/caret/R/maxDissim.R +++ b/pkg/caret/R/maxDissim.R @@ -61,7 +61,7 @@ #' xlab = "variable 1", ylab = "variable 2") #' points(base, pch = 16, cex = .7) #' -#' for(i in seq(along = newSamp)) +#' for(i in seq(along.with = newSamp)) #' points( #' pool[newSamp[i],1], #' pool[newSamp[i],2], @@ -167,11 +167,11 @@ splitByDissim <- function(x, p = .8, y = NULL, start = NULL, ...) if(!is.factor(y)) y <- as.factor(y) lvl <- levels(y) - ind <- split(seq(along = y), y) - ind2 <- lapply(ind, function(x) seq(along = x)) + ind <- split(seq(along.with = y), y) + ind2 <- lapply(ind, function(x) seq(along.with = x)) start2 <- lapply(ind, function(x, start) which(x %in% start), start = start) - for(i in seq(along = lvl)) + for(i in seq(along.with = lvl)) { tmp <- splitter(x[ind[[i]],, drop = FALSE], p = p, diff --git a/pkg/caret/R/misc.R b/pkg/caret/R/misc.R index 6e752bcf..6e5c1f22 100644 --- a/pkg/caret/R/misc.R +++ b/pkg/caret/R/misc.R @@ -1,5 +1,5 @@ subsemble_index <- function(y, J = 2, V = 10){ - dat <- data.frame(y = y, index = seq(along = y)) + dat <- data.frame(y = y, index = seq(along.with = y)) outer_index <- sample(1:J, size = nrow(dat), replace = TRUE) outer_splits <- vector(mode = "list", length = J) for(i in 1:J) { @@ -15,7 +15,7 @@ subsemble_index <- function(y, J = 2, V = 10){ } all_index <- lapply(outer_splits, foo, V = V) model_index <- holdout_index <- NULL - for(i in seq(along = all_index)) { + for(i in seq(along.with = all_index)) { model_index <- c(model_index, all_index[[i]]$model) holdout_index <- c(holdout_index, all_index[[i]]$holdout) } @@ -57,7 +57,7 @@ evalSummaryFunction <- function(y, wts = NULL, perf = NULL, ctrl, lev, metric, m } if(ctrl$classProbs) { - for(i in seq(along = lev)) testOutput[, lev[i]] <- runif(nrow(testOutput)) + for(i in seq(along.with = lev)) testOutput[, lev[i]] <- runif(nrow(testOutput)) testOutput[, lev] <- t(apply(testOutput[, lev], 1, function(x) x/sum(x))) } else { if(metric == "ROC" & !ctrl$classProbs) @@ -145,12 +145,12 @@ flatTable <- function(pred, obs) { cells <- as.vector(table(pred, obs)) if(length(cells) == 0) cells <- rep(NA, length(levels(obs))^2) - names(cells) <- paste(".cell", seq(along= cells), sep = "") + names(cells) <- paste(".cell", seq(along.with= cells), sep = "") cells } -prettySeq <- function(x) paste("Resample", gsub(" ", "0", format(seq(along = x))), sep = "") +prettySeq <- function(x) paste("Resample", gsub(" ", "0", format(seq(along.with = x))), sep = "") #' @rdname caret-internal #' @export @@ -196,12 +196,12 @@ partRuleSummary <- function(x) classPred <- grep("\\)$", conditions, value = TRUE) varUsage <- data.frame(Var = predictors, Overall = 0) - for(i in seq(along = predictors)) + for(i in seq(along.with = predictors)) varUsage$Overall[i] <- sum(grepl(paste("^", predictors[i], sep = ""), conditions)) numClass <- rep(NA, length(classes)) names(numClass) <- classes - for(i in seq(along = classes)) + for(i in seq(along.with = classes)) numClass[i] <- sum(grepl(paste(":", classes[i], sep = " "), classPred)) list(varUsage = varUsage, @@ -222,12 +222,12 @@ ripperRuleSummary <- function(x) conditions <- grep("(<=|>=|<|>|=)", rules, value = TRUE) varUsage <- data.frame(Var = predictors, Overall = 0) - for(i in seq(along = predictors)) + for(i in seq(along.with = predictors)) varUsage$Overall[i] <- sum(grepl(paste("\\(", predictors[i], sep = ""), conditions)) numClass <- rep(NA, length(classes)) names(numClass) <- classes - for(i in seq(along = classes)) + for(i in seq(along.with = classes)) numClass[i] <- sum(grepl(paste(x$terms[[2]], "=", classes[i], sep = ""), conditions)) list(varUsage = varUsage, @@ -255,7 +255,7 @@ repList <- function(x, times = 3, addIndex = FALSE) { out <- vector(mode = "list", length = times) out <- lapply(out, function(a, b) b, b = x) - if(addIndex) for(i in seq(along = out)) out[[i]]$.index <- i + if(addIndex) for(i in seq(along.with = out)) out[[i]]$.index <- i out } @@ -400,10 +400,10 @@ var_seq <- function(p, classification = FALSE, len = 3) { } else { if(p <= len) { - tuneSeq <- floor(seq(2, to = p, length = p)) + tuneSeq <- floor(seq(2, to = p, length.out = p)) } else { - if(p < 500 ) tuneSeq <- floor(seq(2, to = p, length = len)) - else tuneSeq <- floor(2^seq(1, to = log(p, base = 2), length = len)) + if(p < 500 ) tuneSeq <- floor(seq(2, to = p, length.out = len)) + else tuneSeq <- floor(2^seq(1, to = log(p, base = 2), length.out = len)) } } if(any(table(tuneSeq) > 1)) { @@ -629,14 +629,14 @@ fill_failed_pred <- function(index, lev, submod){ nPred <- length(index) if(!is.null(lev)) { predicted <- rep("", nPred) - predicted[seq(along = predicted)] <- NA + predicted[seq(along.with = predicted)] <- NA } else { predicted <- rep(NA, nPred) } if(!is.null(submod)) { tmp <- predicted predicted <- vector(mode = "list", length = nrow(submod) + 1) - for(i in seq(along = predicted)) predicted[[i]] <- tmp + for(i in seq(along.with = predicted)) predicted[[i]] <- tmp rm(tmp) } predicted diff --git a/pkg/caret/R/modelLookup.R b/pkg/caret/R/modelLookup.R index bc098a8f..bcbb1e93 100644 --- a/pkg/caret/R/modelLookup.R +++ b/pkg/caret/R/modelLookup.R @@ -71,7 +71,7 @@ modelLookup <- function(model = NULL){ out$probModel <- !is.null(x$prob) out }) - for(i in seq(along = out)) out[[i]]$model <- names(models)[i] + for(i in seq(along.with = out)) out[[i]]$model <- names(models)[i] out <- do.call("rbind", out) rownames(out) <- NULL out <- out[, c('model', 'parameter', 'label', 'forReg', 'forClass', 'probModel')] @@ -90,7 +90,7 @@ missing_packages <- function(mods = getModelInfo()) { #' @export checkInstall <- function(pkg){ good <- rep(TRUE, length(pkg)) - for(i in seq(along = pkg)){ + for(i in seq(along.with = pkg)){ tested <- try(find.package(pkg[i]), silent = TRUE) if (inherits(tested, "try-error")) good[i] <- FALSE } diff --git a/pkg/caret/R/panel.needle.R b/pkg/caret/R/panel.needle.R index c9862967..6b9b680d 100644 --- a/pkg/caret/R/panel.needle.R +++ b/pkg/caret/R/panel.needle.R @@ -43,7 +43,7 @@ pch <- rep(pch, length(x)) pch <- ifelse(x == 0, NA, pch) - for(i in seq(along=x)) lsegments(x[i], y[i], 0, y[i]) + for(i in seq(along.with=x)) lsegments(x[i], y[i], 0, y[i]) if (is.null(groups)) panel.xyplot(x = x, y = y, col = col, pch = pch, ...) else panel.superpose(x = x, y = y, groups = groups, col = col, pch = pch, ...) diff --git a/pkg/caret/R/plsda.R b/pkg/caret/R/plsda.R index 9ac89934..4186347f 100644 --- a/pkg/caret/R/plsda.R +++ b/pkg/caret/R/plsda.R @@ -58,7 +58,7 @@ #' \dontrun{ #' data(mdrr) #' set.seed(1) -#' inTrain <- sample(seq(along = mdrrClass), 450) +#' inTrain <- sample(seq(along.with = mdrrClass), 450) #' #' nzv <- nearZeroVar(mdrrDescr) #' filteredDescr <- mdrrDescr[, -nzv] @@ -173,7 +173,7 @@ predict.plsda <- function(object, newdata = NULL, ncomp = NULL, type = "class", requireNamespaceQuietStop("klaR") tmp <- vector(mode = "list", length = length(ncomp)) - for(i in seq(along = ncomp)) { + for(i in seq(along.with = ncomp)) { tmp[[i]] <- predict(object$probModel[[ ncomp[i] ]], as.data.frame(tmpPred[,-length(object$obsLevels),i]), stringsAsFactors = TRUE) } @@ -196,7 +196,7 @@ predict.plsda <- function(object, newdata = NULL, ncomp = NULL, type = "class", rownames(tmp[[1]]$posterior), colnames(tmp[[1]]$posterior), paste("ncomp", ncomp, sep = ""))) - for(i in seq(along = ncomp)) out[,,i] <- tmp[[i]]$posterior + for(i in seq(along.with = ncomp)) out[,,i] <- tmp[[i]]$posterior } } out diff --git a/pkg/caret/R/preProcess.R b/pkg/caret/R/preProcess.R index e168af1f..c45aa41d 100644 --- a/pkg/caret/R/preProcess.R +++ b/pkg/caret/R/preProcess.R @@ -337,7 +337,7 @@ preProcess.default <- function(x, method = c("center", "scale"), } # now apply to current data if(length(yj) > 0) { - for(i in seq(along = yj)) { + for(i in seq(along.with = yj)) { who <- names(yj)[i] x[,who] <- recipes::yj_transform(x[,who], yj[who]) } @@ -557,7 +557,7 @@ predict.preProcess <- function(object, newdata, ...) { lam <- get_yj_lambda(object$yj) lam <- lam[!is.na(lam)] if(length(lam) > 0) { - for(i in seq(along = lam)) { + for(i in seq(along.with = lam)) { who <- names(lam)[i] newdata[,who] <- recipes::yj_transform(newdata[,who], lam[who]) } @@ -565,7 +565,7 @@ predict.preProcess <- function(object, newdata, ...) { } if(!is.null(object$et)) { - for(i in seq(along = object$et)) { + for(i in seq(along.with = object$et)) { who <- names(object$et)[i] newdata[,who] <- predict(object$et[[who]], newdata[,who]) } @@ -623,7 +623,7 @@ predict.preProcess <- function(object, newdata, ...) { missingVars <- names(missingVars)[missingVars] ## ipred's bagging procedure only allows for data frames if(!is.data.frame(hasMiss)) hasMiss <- as.data.frame(hasMiss, stringsAsFactors = TRUE) - for(i in seq(along = missingVars)) { + for(i in seq(along.with = missingVars)) { preds <- predict(object$bagImp[[missingVars[i]]]$model, hasMiss[, !colnames(hasMiss) %in% missingVars[i], drop = FALSE]) diff --git a/pkg/caret/R/predict.PLS.R b/pkg/caret/R/predict.PLS.R index 1224de33..e3cc4951 100644 --- a/pkg/caret/R/predict.PLS.R +++ b/pkg/caret/R/predict.PLS.R @@ -26,7 +26,7 @@ predict.PLS <- function(object, newdata, dnB[[1]] <- c("(Intercept)", dnB[[1]]) BInt <- array(dim = dB, dimnames = dnB) BInt[-1, , ] <- B - for (i in seq(along = 1:ncomp)) BInt[1, , i] <- object$Ymeans - object$Xmeans %*% B[, , i] + for (i in seq(along.with = 1:ncomp)) BInt[1, , i] <- object$Ymeans - object$Xmeans %*% B[, , i] B <- BInt # stop diff --git a/pkg/caret/R/predictors.R b/pkg/caret/R/predictors.R index 99555c22..3396b8e7 100644 --- a/pkg/caret/R/predictors.R +++ b/pkg/caret/R/predictors.R @@ -37,7 +37,7 @@ predictors.train <- function(x, ...) { } else code <- x$modelInfo if(!is.null(code$predictors)){ checkInstall(code$library) - for(i in seq(along = code$library)) + for(i in seq(along.with = code$library)) do.call("requireNamespaceQuietStop", list(package = code$library[i])) out <- code$predictors(x$finalModel, ...) } else { @@ -56,7 +56,7 @@ predictors.default <- function(x, ...) { if(!is.null(code)) { if(!is.null(code$predictors)){ checkInstall(code$library) - for(i in seq(along = code$library)) + for(i in seq(along.with = code$library)) do.call("requireNamespaceQuietStop", list(package = code$library[i])) out <- code$predictors(x, ...) } else { @@ -91,7 +91,7 @@ hasTerms <- function(x) basicVars <- function(x, y) { hasVar <- rep(NA, length(x)) - for(i in seq(along = x)) + for(i in seq(along.with = x)) hasVar[i] <- length(grep(x[i], y, fixed = TRUE)) > 0 x[hasVar] } diff --git a/pkg/caret/R/print.train.R b/pkg/caret/R/print.train.R index cb0346f6..33b1dcb5 100644 --- a/pkg/caret/R/print.train.R +++ b/pkg/caret/R/print.train.R @@ -192,7 +192,7 @@ stringFunc <- function (x) { numVals <- apply(tuneAcc[, params, drop = FALSE], 2, function(x) length(unique(x))) if(any(numVals < 2)) { constString <- NULL - for(i in seq(along = numVals)) { + for(i in seq(along.with = numVals)) { if(numVals[i] == 1) constString <- c(constString, paste0("Tuning parameter '", @@ -210,7 +210,7 @@ stringFunc <- function (x) { colnames(tuneAcc)[colnames(tuneAcc) == ".B"] <- "Resamples" nms <- names(tuneAcc)[names(tuneAcc) %in% params] sort_args <- vector(mode = "list", length = length(nms)) - for(i in seq(along = nms)) { + for(i in seq(along.with = nms)) { sort_args[[i]] <- tuneAcc[, nms[i]] } tune_ord <- do.call("order", sort_args) diff --git a/pkg/caret/R/resamples.R b/pkg/caret/R/resamples.R index b9ce2223..2d5c5583 100644 --- a/pkg/caret/R/resamples.R +++ b/pkg/caret/R/resamples.R @@ -142,7 +142,7 @@ resamples.default <- function(x, modelNames = names(x), ...) { } rs_values <- vector(mode = "list", length = length(x)) - for(i in seq(along = x)) { + for(i in seq(along.with = x)) { if(class(x[[i]])[1] == "rfe" && x[[i]]$control$returnResamp == "all"){ warning(paste0("'", modelNames[i], "' did not have 'returnResamp=\"final\"; the optimal subset is used")) } @@ -170,7 +170,7 @@ resamples.default <- function(x, modelNames = names(x), ...) { rs_values <- lapply(rs_values, function(x, n) x[,n,drop = FALSE], n = c(pNames, "Resample")) - for(mod in seq(along = modelNames)) { + for(mod in seq(along.with = modelNames)) { names(rs_values[[mod]])[names(rs_values[[mod]]) %in% pNames] <- paste(modelNames[mod], names(rs_values[[mod]])[names(rs_values[[mod]]) %in% pNames], sep = "~") out <- if(mod == 1) rs_values[[mod]] else merge(out, rs_values[[mod]]) @@ -209,7 +209,7 @@ sort.resamples <- function(x, decreasing = FALSE, metric = x$metric[1], FUN = me summary.resamples <- function(object, metric = object$metrics, ...){ vals <- object$values[, names(object$values) != "Resample", drop = FALSE] out <- vector(mode = "list", length = length(metric)) - for(i in seq(along = metric)) { + for(i in seq(along.with = metric)) { tmpData <- vals[, grep(paste("~", metric[i], sep = ""), names(vals), fixed = TRUE), drop = FALSE] out[[i]] <- do.call("rbind", lapply(tmpData, function(x) summary(x)[1:6])) @@ -418,14 +418,14 @@ plot.prcomp.resamples <- function(x, what = "scree", dims = max(2, ncol(x$rotati scree = { barchart(x$sdev ~ paste("PC", - gsub(" ", "0", format(seq(along = x$sdev))), + gsub(" ", "0", format(seq(along.with = x$sdev))), sep = ""), ylab = "Standard Deviation", ...) }, cumulative = { barchart(cumsum(x$sdev^2)/sum(x$sdev^2) ~ paste("PC", - gsub(" ", "0", format(seq(along = x$sdev))), + gsub(" ", "0", format(seq(along.with = x$sdev))), sep = ""), ylab = "Culmulative Percent of Variance", ...) }, @@ -530,7 +530,7 @@ print.summary.resamples <- function(x, ...) cat("\n") - for(i in seq(along = x$statistics)) + for(i in seq(along.with = x$statistics)) { cat(names(x$statistics)[i], "\n") print(x$statistics[[i]]) @@ -700,7 +700,7 @@ xyplot.resamples <- function (x, data = NULL, what = "scatter", models = NULL, m lx <- as.numeric(lx[subscripts]) ux <- as.numeric(ux[subscripts]) gps <- unique(groups) - for(i in seq(along = gps)) + for(i in seq(along.with = gps)) { panel.arrows(lx[groups == gps[i]], y[groups == gps[i]], @@ -1122,7 +1122,7 @@ diff.resamples <- function(x, if(adjustment == "bonferroni") confLevel <- 1 - ((1 - confLevel)/ncomp) allStats <- allDif - for(h in seq(along = metric)) + for(h in seq(along.with = metric)) { index <- 0 dif <- matrix(NA, @@ -1131,9 +1131,9 @@ diff.resamples <- function(x, stat <- vector(mode = "list", length = choose(length(models), 2)) colnames(dif) <- paste("tmp", 1:ncol(dif), sep = "") - for(i in seq(along = models)) + for(i in seq(along.with = models)) { - for(j in seq(along = models)) + for(j in seq(along.with = models)) { if(i < j) { @@ -1228,13 +1228,13 @@ summary.diff.resamples <- function(object, digits = max(3, getOption("digits") - all <- vector(mode = "list", length = length(object$metric)) names(all) <- object$metric - for(h in seq(along = object$metric)) + for(h in seq(along.with = object$metric)) { pvals <- matrix(NA, nrow = length(object$models), ncol = length(object$models)) meanDiff <- pvals index <- 0 - for(i in seq(along = object$models)) { - for(j in seq(along = object$models)) { + for(i in seq(along.with = object$models)) { + for(j in seq(along.with = object$models)) { if(i < j) { index <- index + 1 meanDiff[i, j] <- object$statistics[[h]][index][[1]]$estimate @@ -1243,8 +1243,8 @@ summary.diff.resamples <- function(object, digits = max(3, getOption("digits") - } index <- 0 - for(i in seq(along = object$models)) { - for(j in seq(along = object$models)) { + for(i in seq(along.with = object$models)) { + for(j in seq(along.with = object$models)) { if(i < j) { index <- index + 1 pvals[j, i] <- object$statistics[[h]][index][[1]]$p.value @@ -1284,13 +1284,13 @@ levelplot.diff.resamples <- function(x, data = NULL, metric = x$metric[1], what all <- vector(mode = "list", length = length(x$metric)) names(all) <- x$metric - for(h in seq(along = x$metric)) + for(h in seq(along.with = x$metric)) { temp <- matrix(NA, nrow = length(x$models), ncol = length( x$models)) index <- 0 - for(i in seq(along = x$models)) + for(i in seq(along.with = x$models)) { - for(j in seq(along = x$models)) + for(j in seq(along.with = x$models)) { if(i < j) @@ -1342,7 +1342,7 @@ print.summary.diff.resamples <- function(x, ...) "Lower diagonal: p-value for H0: difference = 0\n\n", sep = "") - for(i in seq(along = x$table)) + for(i in seq(along.with = x$table)) { cat(names(x$table)[i], "\n") print(x$table[[i]], quote = FALSE) @@ -1419,9 +1419,9 @@ dotplot.diff.resamples <- function(x, data = NULL, metric = x$metric[1], ...) plotData <- as.data.frame(matrix(NA, ncol = 3, nrow = ncol(x$difs[[metric]])), stringsAsFactors = TRUE) ## Get point and interval estimates on the differences index <- 0 - for(i in seq(along = x$models)) + for(i in seq(along.with = x$models)) { - for(j in seq(along = x$models)) + for(j in seq(along.with = x$models)) { if(i < j) @@ -1462,7 +1462,7 @@ dotplot.diff.resamples <- function(x, data = NULL, metric = x$metric[1], ...) col = plotTheme$reference.line$col[1], lty = plotTheme$reference.line$lty[1], lwd = plotTheme$reference.line$lwd[1]) - for(i in seq(along = upper$mod)) + for(i in seq(along.with = upper$mod)) { panel.segments(upper$x[i], upper$mod[i], lower$x[i], lower$mod[i], col = plotTheme$plot.line$col[1], diff --git a/pkg/caret/R/rfe.R b/pkg/caret/R/rfe.R index 513031b6..1427b8de 100644 --- a/pkg/caret/R/rfe.R +++ b/pkg/caret/R/rfe.R @@ -196,7 +196,7 @@ rfe <- function (x, ...) UseMethod("rfe") if(is.null(rfeControl$indexOut)){ rfeControl$indexOut <- lapply(rfeControl$index, function(training, allSamples) allSamples[-unique(training)], - allSamples = seq(along = y)) + allSamples = seq(along.with = y)) names(rfeControl$indexOut) <- prettySeq(rfeControl$indexOut) } @@ -209,7 +209,7 @@ rfe <- function (x, ...) UseMethod("rfe") if(is.factor(y)) { - for(i in seq(along = classLevels)) testOutput[, classLevels[i]] <- runif(nrow(testOutput)) + for(i in seq(along.with = classLevels)) testOutput[, classLevels[i]] <- runif(nrow(testOutput)) } test <- rfeControl$functions$summary(testOutput, lev = classLevels) @@ -412,7 +412,7 @@ rfeIter <- function(x, y, sizeText <- format(sizeValues) finalVariables <- vector(length(sizeValues), mode = "list") - for(k in seq(along = sizeValues)) + for(k in seq(along.with = sizeValues)) { if(!any(is.na(seeds))) set.seed(seeds[k]) if(rfeControl$verbose) @@ -1448,7 +1448,7 @@ rfe_rec <- function(x, y, test_x, test_y, perf_dat, sizeText <- format(sizeValues) finalVariables <- vector(length(sizeValues), mode = "list") - for (k in seq(along = sizeValues)) { + for (k in seq(along.with = sizeValues)) { if (!any(is.na(seeds))) set.seed(seeds[k]) @@ -1640,7 +1640,7 @@ rfe_rec <- function(x, y, test_x, test_y, perf_dat, rfeControl$indexOut <- lapply(rfeControl$index, function(training, allSamples) allSamples[-unique(training)], - allSamples = seq(along = y_dat)) + allSamples = seq(along.with = y_dat)) names(rfeControl$indexOut) <- prettySeq(rfeControl$indexOut) } @@ -1656,7 +1656,7 @@ rfe_rec <- function(x, y, test_x, test_y, perf_dat, testOutput <- data.frame(pred = sample(y_dat, min(10, length(y_dat))), obs = sample(y_dat, min(10, length(y_dat)))) if (is.factor(y_dat)) { - for (i in seq(along = classLevels)) + for (i in seq(along.with = classLevels)) testOutput[, classLevels[i]] <- runif(nrow(testOutput)) } if(!is.null(perf_data)) @@ -1865,7 +1865,7 @@ rfe_rec_workflow <- function(rec, data, sizes, ctrl, lev, ...) { `%op%` <- getOper(ctrl$allowParallel && foreach::getDoParWorkers() > 1) result <- foreach( - iter = seq(along = resampleIndex), + iter = seq(along.with = resampleIndex), .combine = "c", .verbose = FALSE, .errorhandling = "stop", @@ -1974,7 +1974,7 @@ rfe_rec_workflow <- function(rec, data, sizes, ctrl, lev, ...) { ## So, we need to find out how many set of predictions there are: nReps <- length(table(rfeResults$pred$Variables)) rfeResults$pred$rowIndex <- - rep(seq(along = y)[unique(holdoutIndex)], nReps) + rep(seq(along.with = y)[unique(holdoutIndex)], nReps) } if (is.factor(y) && length(lev) <= 50) { @@ -2029,7 +2029,7 @@ rfe_rec_workflow <- function(rec, data, sizes, ctrl, lev, ...) { if (ctrl$method %in% c("boot632")) { externPerf <- merge(externPerf, apparent) - for (p in seq(along = perfNames)) { + for (p in seq(along.with = perfNames)) { const <- 1 - exp(-1) externPerf[, perfNames[p]] <- (const * externPerf[, perfNames[p]]) + ((1 - const) * externPerf[, paste(perfNames[p], "Apparent", sep = "")]) @@ -2048,7 +2048,7 @@ rfe_rec_loo <- function(rec, data, sizes, ctrl, lev, ...) { `%op%` <- getOper(ctrl$allowParallel && getDoParWorkers() > 1) result <- foreach( - iter = seq(along = resampleIndex), + iter = seq(along.with = resampleIndex), .combine = "c", .verbose = FALSE, .errorhandling = "stop", diff --git a/pkg/caret/R/safs.R b/pkg/caret/R/safs.R index e13764c4..ed63c348 100644 --- a/pkg/caret/R/safs.R +++ b/pkg/caret/R/safs.R @@ -533,7 +533,7 @@ safs <- function (x, ...) UseMethod("safs") if(is.null(safsControl$indexOut)){ safsControl$indexOut <- lapply(safsControl$index, function(training, allSamples) allSamples[-unique(training)], - allSamples = seq(along = y)) + allSamples = seq(along.with = y)) names(safsControl$indexOut) <- getFromNamespace("prettySeq", "caret")(safsControl$indexOut) } @@ -551,7 +551,7 @@ safs <- function (x, ...) UseMethod("safs") obs = sample(y, min(10, length(y)))) if(is.factor(y)) - for(i in seq(along = classLevels)) testOutput[, classLevels[i]] <- runif(nrow(testOutput)) + for(i in seq(along.with = classLevels)) testOutput[, classLevels[i]] <- runif(nrow(testOutput)) test <- safsControl$functions$fitness_extern(testOutput, lev = classLevels) perfNames <- names(test) @@ -571,7 +571,7 @@ safs <- function (x, ...) UseMethod("safs") `%op%` <- getOper(safsControl$allowParallel && getDoParWorkers() > 1) # sa_resampled <- external <- vector(mode = "list", length = length(safsControl$index)) - result <- foreach(i = seq(along = safsControl$index), .combine = "c", .verbose = FALSE, .errorhandling = "stop") %op% { + result <- foreach(i = seq(along.with = safsControl$index), .combine = "c", .verbose = FALSE, .errorhandling = "stop") %op% { sa_select(x[safsControl$index[[i]],,drop=FALSE], y[safsControl$index[[i]]], funcs = safsControl$functions, @@ -618,9 +618,9 @@ safs <- function (x, ...) UseMethod("safs") in_holdout <- createDataPartition(y, p = safsControl$holdout, list = FALSE) - in_model <- seq(along = y)[-unique(in_holdout)] + in_model <- seq(along.with = y)[-unique(in_holdout)] } else { - in_model <- seq(along = y) + in_model <- seq(along.with = y) in_holdout <- NULL } final_sa <- sa_select(x[in_model,,drop=FALSE], @@ -812,7 +812,7 @@ safs_initial <- function (vars, prob = .20, ...) { #' @export safs_perturb <- function(x, vars, number = floor(length(x)*.01) + 1) { bin <- index2vec(x, vars) - change <- sample(seq(along = bin), size = number) + change <- sample(seq(along.with = bin), size = number) bin[change] <- ifelse(bin[change] == 1, 0, 1) sort(which(bin == 1)) } @@ -1382,7 +1382,7 @@ update.safs <- function(object, iter, x, y, ...) { safsControl$indexOut <- lapply(safsControl$index, function(training, allSamples) allSamples[-unique(training)], - allSamples = seq(along = y)) + allSamples = seq(along.with = y)) names(safsControl$indexOut) <- getFromNamespace("prettySeq", "caret")(safsControl$indexOut) } @@ -1401,7 +1401,7 @@ update.safs <- function(object, iter, x, y, ...) { obs = sample(y, min(10, length(y)))) if(is.factor(y)) - for(i in seq(along = classLevels)) testOutput[, classLevels[i]] <- runif(nrow(testOutput)) + for(i in seq(along.with = classLevels)) testOutput[, classLevels[i]] <- runif(nrow(testOutput)) if(!is.null(perf_data)) testOutput <- cbind( testOutput, @@ -1427,7 +1427,7 @@ update.safs <- function(object, iter, x, y, ...) { `%op%` <- getOper(safsControl$allowParallel && getDoParWorkers() > 1) result <- foreach( - i = seq(along = safsControl$index), + i = seq(along.with = safsControl$index), .combine = "c", .verbose = FALSE, .errorhandling = "stop", @@ -1510,9 +1510,9 @@ update.safs <- function(object, iter, x, y, ...) { in_holdout <- createDataPartition(y, p = safsControl$holdout, list = FALSE) - in_model <- seq(along = y)[-unique(in_holdout)] + in_model <- seq(along.with = y)[-unique(in_holdout)] } else { - in_model <- seq(along = y) + in_model <- seq(along.with = y) in_holdout <- NULL } final_sa <- sa_select( diff --git a/pkg/caret/R/sampling.R b/pkg/caret/R/sampling.R index 8cd9cbbf..c0290a26 100644 --- a/pkg/caret/R/sampling.R +++ b/pkg/caret/R/sampling.R @@ -48,7 +48,7 @@ downSample <- function(x, y, list = FALSE, yname = "Class") { x <- ddply(x, .(y), function(dat, n) - dat[sample(seq(along = dat$.outcome), n), , drop = FALSE], + dat[sample(seq(along.with = dat$.outcome), n), , drop = FALSE], n = minClass) y <- x$.outcome x <- x[, !(colnames(x) %in% c("y", ".outcome")), drop = FALSE] diff --git a/pkg/caret/R/selectByFilter.R b/pkg/caret/R/selectByFilter.R index 02e62826..1d0a99b7 100644 --- a/pkg/caret/R/selectByFilter.R +++ b/pkg/caret/R/selectByFilter.R @@ -196,7 +196,7 @@ sbf <- function (x, ...) UseMethod("sbf") if(is.null(sbfControl$indexOut)){ sbfControl$indexOut <- lapply(sbfControl$index, function(training, allSamples) allSamples[-unique(training)], - allSamples = seq(along = y)) + allSamples = seq(along.with = y)) names(sbfControl$indexOut) <- prettySeq(sbfControl$indexOut) } ## check summary function and metric @@ -204,7 +204,7 @@ sbf <- function (x, ...) UseMethod("sbf") obs = sample(y, min(10, length(y)))) if(is.factor(y)) - for(i in seq(along = classLevels)) + for(i in seq(along.with = classLevels)) testOutput[, classLevels[i]] <- runif(nrow(testOutput)) @@ -389,7 +389,7 @@ sbf.formula <- function (form, data, ..., subset, na.action, contrasts = NULL) { if(is.null(sbfControl$indexOut)){ sbfControl$indexOut <- lapply(sbfControl$index, function(training, allSamples) allSamples[-unique(training)], - allSamples = seq(along = y)) + allSamples = seq(along.with = y)) names(sbfControl$indexOut) <- prettySeq(sbfControl$indexOut) } ## check summary function and metric @@ -397,7 +397,7 @@ sbf.formula <- function (form, data, ..., subset, na.action, contrasts = NULL) { obs = sample(y, min(10, length(y)))) if(is.factor(y)) - for(i in seq(along = classLevels)) + for(i in seq(along.with = classLevels)) testOutput[, classLevels[i]] <- runif(nrow(testOutput)) if(!is.null(perf_data)) testOutput <- cbind( @@ -522,7 +522,7 @@ sbf_rec <- function(rec, data, ctrl, lev, ...) { `%op%` <- getOper(ctrl$allowParallel && getDoParWorkers() > 1) result <- foreach( - iter = seq(along = resampleIndex), + iter = seq(along.with = resampleIndex), .combine = "c", .verbose = FALSE, .errorhandling = "stop", @@ -612,7 +612,7 @@ sbf_rec <- function(rec, data, ctrl, lev, ...) { const <- 1-exp(-1) - for(p in seq(along = perfNames)) + for(p in seq(along.with = perfNames)) performance[perfNames[p]] <- (const * performance[perfNames[p]]) + ((1-const) * apparent[perfNames[p]]) } @@ -637,7 +637,7 @@ sbf_loo_rec <- function(rec, data, ctrl, lev, ...) { `%op%` <- getOper(ctrl$allowParallel && getDoParWorkers() > 1) result <- foreach( - iter = seq(along = resampleIndex), + iter = seq(along.with = resampleIndex), .combine = "c", .verbose = FALSE, .errorhandling = "stop", diff --git a/pkg/caret/R/sensitivity.R b/pkg/caret/R/sensitivity.R index 1205dce3..7c68d383 100644 --- a/pkg/caret/R/sensitivity.R +++ b/pkg/caret/R/sensitivity.R @@ -90,7 +90,7 @@ #' #' prev <- seq(0.001, .99, length = 20) #' npvVals <- ppvVals <- prev * NA -#' for(i in seq(along = prev)) +#' for(i in seq(along.with = prev)) #' { #' ppvVals[i] <- posPredValue(pred, truth, prevalence = prev[i]) #' npvVals[i] <- negPredValue(pred, truth, prevalence = prev[i]) diff --git a/pkg/caret/R/sortImp.R b/pkg/caret/R/sortImp.R index 268bfe03..36abdeb1 100644 --- a/pkg/caret/R/sortImp.R +++ b/pkg/caret/R/sortImp.R @@ -23,7 +23,7 @@ sortImp <- function(object, top) if(length(tiedRanks) > 0) { - for(i in seq(along = tiedRanks)) + for(i in seq(along.with = tiedRanks)) { tmp <- featureRank[featureRank == tiedRanks[i]] featureRank[featureRank == tiedRanks[i]] <- tmp + runif(length(tmp), min = 0.001, max = 0.999) diff --git a/pkg/caret/R/train.default.R b/pkg/caret/R/train.default.R index ad2215ff..7c3ca134 100644 --- a/pkg/caret/R/train.default.R +++ b/pkg/caret/R/train.default.R @@ -345,7 +345,7 @@ train.default <- function(x, y, stop(paste("Model", method, "is not in caret's built-in library"), call. = FALSE) } checkInstall(models$library) - for(i in seq(along = models$library)) do.call("requireNamespaceQuietStop", list(package = models$library[i])) + for(i in seq(along.with = models$library)) do.call("requireNamespaceQuietStop", list(package = models$library[i])) if(any(names(models) == "check") && is.function(models$check)) { software_check <- models$check(models$library) } @@ -585,7 +585,7 @@ train.default <- function(x, y, ## tmp <- vector(mode = "list", length = nrow(param) + 1) ## tmp[[1]] <- out ## - ## for(j in seq(along = param$.n.trees)) + ## for(j in seq(along.with = param$.n.trees)) ## { ## tmp[[j]] <- predict(modelFit, ## newdata, @@ -816,7 +816,7 @@ train.default <- function(x, y, ## Reorder rows of performance orderList <- list() - for(i in seq(along = paramNames)) orderList[[i]] <- performance[,paramNames[i]] + for(i in seq(along.with = paramNames)) orderList[[i]] <- performance[,paramNames[i]] performance <- performance[do.call("order", orderList),] @@ -831,7 +831,7 @@ train.default <- function(x, y, ## Make the final model based on the tuning results - indexFinal <- if(is.null(trControl$indexFinal)) seq(along = y) else trControl$indexFinal + indexFinal <- if(is.null(trControl$indexFinal)) seq(along.with = y) else trControl$indexFinal if(!(length(trControl$seeds) == 1 && is.na(trControl$seeds))) set.seed(trControl$seeds[[length(trControl$seeds)]][1]) startFinalTime <- proc.time() @@ -944,6 +944,7 @@ train.formula <- function (form, data, ..., weights, subset, na.action = na.fail # do we need the double colon here? m[[1]] <- quote(stats::model.frame) + names(m)[names(m) == "form"] <- "formula" # avoid warning under warnPartialMatchArgs=TRUE m <- eval.parent(m) if(nrow(m) < 1) stop("Every row has at least one missing value were found", call. = FALSE) Terms <- attr(m, "terms") @@ -1012,7 +1013,7 @@ train.recipe <- function(x, stop(paste("Model", method, "is not in caret's built-in library"), call. = FALSE) } checkInstall(models$library) - for(i in seq(along = models$library)) + for(i in seq(along.with = models$library)) do.call("requireNamespace", list(package = models$library[i])) if(any(names(models) == "check") && is.function(models$check)) { software_check <- models$check(models$library) @@ -1410,7 +1411,7 @@ train.recipe <- function(x, ## Reorder rows of performance orderList <- list() - for(i in seq(along = paramNames)) orderList[[i]] <- performance[,paramNames[i]] + for(i in seq(along.with = paramNames)) orderList[[i]] <- performance[,paramNames[i]] performance <- performance[do.call("order", orderList),] @@ -1425,7 +1426,7 @@ train.recipe <- function(x, ## Make the final model based on the tuning results indexFinal <- if(is.null(trControl$indexFinal)) - seq(along = data[[y_orig_val]]) else trControl$indexFinal + seq(along.with = data[[y_orig_val]]) else trControl$indexFinal if(!(length(trControl$seeds) == 1 && is.na(trControl$seeds))) set.seed(trControl$seeds[[length(trControl$seeds)]][1]) diff --git a/pkg/caret/R/train_recipes.R b/pkg/caret/R/train_recipes.R index f192539b..201877aa 100644 --- a/pkg/caret/R/train_recipes.R +++ b/pkg/caret/R/train_recipes.R @@ -211,7 +211,7 @@ loo_train_rec <- function(rec, dat, info, method, if(!is.null(method$library)) pkgs <- c(pkgs, method$library) - result <- foreach(iter = seq(along = ctrl$index), + result <- foreach(iter = seq(along.with = ctrl$index), .combine = "rbind", .verbose = FALSE, .packages = pkgs, @@ -309,7 +309,7 @@ loo_train_rec <- function(rec, dat, info, method, if(testing) print(head(predicted)) ## same for the class probabilities if(ctrl$classProbs) { - for(k in seq(along = predicted)) predicted[[k]] <- + for(k in seq(along.with = predicted)) predicted[[k]] <- cbind(predicted[[k]], probValues[[k]]) } predicted <- do.call("rbind", predicted) @@ -409,7 +409,7 @@ train_rec <- function(rec, dat, info, method, ctrl, lev, testing = FALSE, ...) { export <- c() - result <- foreach(iter = seq(along = resampleIndex), .combine = "c", .packages = pkgs, .export = export) %:% + result <- foreach(iter = seq(along.with = resampleIndex), .combine = "c", .packages = pkgs, .export = export) %:% foreach(parm = 1L:nrow(info$loop), .combine = "c", .packages = pkgs, .export = export) %op% { if(!(length(ctrl$seeds) == 1L && is.na(ctrl$seeds))) @@ -517,7 +517,7 @@ train_rec <- function(rec, dat, info, method, ctrl, lev, testing = FALSE, ...) { if(keep_pred) { tmpPred <- predicted - for(modIndex in seq(along = tmpPred)) { + for(modIndex in seq(along.with = tmpPred)) { tmpPred[[modIndex]] <- merge(tmpPred[[modIndex]], allParam[modIndex,,drop = FALSE], all = TRUE) @@ -537,7 +537,7 @@ train_rec <- function(rec, dat, info, method, ctrl, lev, testing = FALSE, ...) { if(length(lev) > 1 && length(lev) <= 50) { cells <- lapply(predicted, function(x) flatTable(x$pred, x$obs)) - for(ind in seq(along = cells)) + for(ind in seq(along.with = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) } thisResample <- do.call("rbind", thisResample) @@ -663,12 +663,12 @@ train_adapt_rec <- function(rec, dat, info, method, ctrl, lev, metric, maximize, pkgs <- c("methods", "caret") if(!is.null(method$library)) pkgs <- c(pkgs, method$library) - init_index <- seq(along = resampleIndex)[1:(ctrl$adaptive$min-1)] - extra_index <- seq(along = resampleIndex)[-(1:(ctrl$adaptive$min-1))] + init_index <- seq(along.with = resampleIndex)[1:(ctrl$adaptive$min-1)] + extra_index <- seq(along.with = resampleIndex)[-(1:(ctrl$adaptive$min-1))] keep_pred <- isTRUE(ctrl$savePredictions) || ctrl$savePredictions %in% c("all", "final") - init_result <- foreach(iter = seq(along = init_index), + init_result <- foreach(iter = seq(along.with = init_index), .combine = "c", .verbose = FALSE, .packages = pkgs, @@ -776,13 +776,13 @@ train_adapt_rec <- function(rec, dat, info, method, ctrl, lev, metric, maximize, ## same for the class probabilities if(ctrl$classProbs) { - for(k in seq(along = predicted)) + for(k in seq(along.with = predicted)) predicted[[k]] <- cbind(predicted[[k]], probValues[[k]]) } if(keep_pred) { tmpPred <- predicted - for(modIndex in seq(along = tmpPred)) { + for(modIndex in seq(along.with = tmpPred)) { tmpPred[[modIndex]]$rowIndex <- holdoutIndex tmpPred[[modIndex]] <- merge(tmpPred[[modIndex]], allParam[modIndex,,drop = FALSE], @@ -802,7 +802,7 @@ train_adapt_rec <- function(rec, dat, info, method, ctrl, lev, metric, maximize, if(length(lev) > 1 && length(lev) <= 50) { cells <- lapply(predicted, function(x) flatTable(x$pred, x$obs)) - for(ind in seq(along = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) + for(ind in seq(along.with = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) } thisResample <- do.call("rbind", thisResample) thisResample <- cbind(allParam, thisResample) @@ -955,13 +955,13 @@ train_adapt_rec <- function(rec, dat, info, method, ctrl, lev, metric, maximize, ## same for the class probabilities if(ctrl$classProbs) { - for(k in seq(along = predicted)) + for(k in seq(along.with = predicted)) predicted[[k]] <- cbind(predicted[[k]], probValues[[k]]) } if(keep_pred) { tmpPred <- predicted - for(modIndex in seq(along = tmpPred)) { + for(modIndex in seq(along.with = tmpPred)) { tmpPred[[modIndex]]$rowIndex <- holdoutIndex tmpPred[[modIndex]] <- merge(tmpPred[[modIndex]], allParam[modIndex,,drop = FALSE], @@ -981,7 +981,7 @@ train_adapt_rec <- function(rec, dat, info, method, ctrl, lev, metric, maximize, if(length(lev) > 1 && length(lev) <= 50) { cells <- lapply(predicted, function(x) flatTable(x$pred, x$obs)) - for(ind in seq(along = cells)) + for(ind in seq(along.with = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) } thisResample <- do.call("rbind", thisResample) @@ -1093,7 +1093,7 @@ train_adapt_rec <- function(rec, dat, info, method, ctrl, lev, metric, maximize, printed <- format(new_info$loop, digits = 4) colnames(printed) <- gsub("^\\.", "", colnames(printed)) - final_index <- seq(along = resampleIndex)[(last_iter+1):length(ctrl$index)] + final_index <- seq(along.with = resampleIndex)[(last_iter+1):length(ctrl$index)] final_result <- foreach(iter = final_index, .combine = "c", .verbose = FALSE, @@ -1200,13 +1200,13 @@ train_adapt_rec <- function(rec, dat, info, method, ctrl, lev, metric, maximize, ## same for the class probabilities if(ctrl$classProbs) { - for(k in seq(along = predicted)) + for(k in seq(along.with = predicted)) predicted[[k]] <- cbind(predicted[[k]], probValues[[k]]) } if(keep_pred) { tmpPred <- predicted - for(modIndex in seq(along = tmpPred)) { + for(modIndex in seq(along.with = tmpPred)) { tmpPred[[modIndex]]$rowIndex <- holdoutIndex tmpPred[[modIndex]] <- merge(tmpPred[[modIndex]], allParam[modIndex,,drop = FALSE], @@ -1226,7 +1226,7 @@ train_adapt_rec <- function(rec, dat, info, method, ctrl, lev, metric, maximize, if(length(lev) > 1 && length(lev) <= 50) { cells <- lapply(predicted, function(x) flatTable(x$pred, x$obs)) - for(ind in seq(along = cells)) + for(ind in seq(along.with = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) } thisResample <- do.call("rbind", thisResample) diff --git a/pkg/caret/R/twoClassSim.R b/pkg/caret/R/twoClassSim.R index b43cb304..a78a1bcf 100644 --- a/pkg/caret/R/twoClassSim.R +++ b/pkg/caret/R/twoClassSim.R @@ -202,9 +202,9 @@ twoClassSim <- function(n = 100, 2*sin(pi*tmpData$Nonlinear2* tmpData$Nonlinear3) if(linearVars > 0) { - lin <- seq(10, 1, length = linearVars)/4 + lin <- seq(10, 1, length.out = linearVars)/4 lin <- lin * rep(c(-1, 1), floor(linearVars)+1)[1:linearVars] - for(i in seq(along = lin)) lp <- lp + tmpData[, i+3]*lin[i] + for(i in seq(along.with = lin)) lp <- lp + tmpData[, i+3]*lin[i] } if(ordinal){ diff --git a/pkg/caret/R/varImp.R b/pkg/caret/R/varImp.R index 052747ef..da549a88 100644 --- a/pkg/caret/R/varImp.R +++ b/pkg/caret/R/varImp.R @@ -280,7 +280,7 @@ GarsonWeights_FCNN4R <- function (object, xnames = NULL, ynames = NULL) { varImpDependencies <- function(libName){ code <- getModelInfo(libName, regex = FALSE)[[1]] checkInstall(code$library) - for(i in seq(along = code$library)) + for(i in seq(along.with = code$library)) do.call("requireNamespaceQuietStop", list(package = code$library[i])) return(code) } diff --git a/pkg/caret/R/varImp.train.R b/pkg/caret/R/varImp.train.R index 5db6db2b..70eea0de 100644 --- a/pkg/caret/R/varImp.train.R +++ b/pkg/caret/R/varImp.train.R @@ -6,7 +6,7 @@ if(is.null(code$varImp)) useModel <- FALSE if(useModel) { checkInstall(code$library) - for(i in seq(along = code$library)) + for(i in seq(along.with = code$library)) do.call("requireNamespaceQuietStop", list(package = code$library[i])) imp <- code$varImp(object$finalModel, ...) modelName <- object$method diff --git a/pkg/caret/R/workflows.R b/pkg/caret/R/workflows.R index 059a096a..80bc3665 100644 --- a/pkg/caret/R/workflows.R +++ b/pkg/caret/R/workflows.R @@ -77,7 +77,7 @@ nominalTrainWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, tes if(!is.null(method$library)) pkgs <- c(pkgs, method$library) export <- c() - result <- foreach(iter = seq(along = resampleIndex), .combine = "c", .verbose = FALSE, .export = export, .packages = "caret") %:% + result <- foreach(iter = seq(along.with = resampleIndex), .combine = "c", .verbose = FALSE, .export = export, .packages = "caret") %:% foreach(parm = 1L:nrow(info$loop), .combine = "c", .verbose = FALSE, .export = export , .packages = "caret") %op% { if(!(length(ctrl$seeds) == 1 && is.na(ctrl$seeds))) set.seed(ctrl$seeds[[iter]][parm]) @@ -190,7 +190,7 @@ nominalTrainWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, tes if(keep_pred) { tmpPred <- predicted - for(modIndex in seq(along = tmpPred)) { + for(modIndex in seq(along.with = tmpPred)) { tmpPred[[modIndex]] <- merge(tmpPred[[modIndex]], allParam[modIndex,,drop = FALSE], all = TRUE) @@ -209,7 +209,7 @@ nominalTrainWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, tes if(length(lev) > 1 && length(lev) <= 50) { cells <- lapply(predicted, function(x) flatTable(x$pred, x$obs)) - for(ind in seq(along = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) + for(ind in seq(along.with = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) } thisResample <- do.call("rbind", thisResample) thisResample <- cbind(allParam, thisResample) @@ -347,7 +347,7 @@ looTrainWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, testing pkgs <- c("methods", "caret") if(!is.null(method$library)) pkgs <- c(pkgs, method$library) - result <- foreach(iter = seq(along = ctrl$index), .combine = "rbind", .verbose = FALSE, .errorhandling = "stop", .packages = "caret") %:% + result <- foreach(iter = seq(along.with = ctrl$index), .combine = "rbind", .verbose = FALSE, .errorhandling = "stop", .packages = "caret") %:% foreach(parm = 1:nrow(info$loop), .combine = "rbind", .verbose = FALSE, .errorhandling = "stop", .packages = "caret") %op% { if(!(length(ctrl$seeds) == 1 && is.na(ctrl$seeds))) set.seed(ctrl$seeds[[iter]][parm]) @@ -432,12 +432,12 @@ looTrainWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, testing y = y[holdoutIndex], wts = wts[holdoutIndex], lv = lev, - rows = seq(along = y)[holdoutIndex]) + rows = seq(along.with = y)[holdoutIndex]) if(testing) print(head(predicted)) ## same for the class probabilities if(ctrl$classProbs) - for(k in seq(along = predicted)) + for(k in seq(along.with = predicted)) predicted[[k]] <- cbind(predicted[[k]], probValues[[k]]) predicted <- do.call("rbind", predicted) allParam <- expandParameters(info$loop[parm,,drop = FALSE], submod) @@ -451,7 +451,7 @@ looTrainWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, testing stringsAsFactors = FALSE) if(!is.null(wts)) predicted$weights <- wts[holdoutIndex] if(ctrl$classProbs) predicted <- cbind(predicted, probValues) - predicted$rowIndex <- seq(along = y)[holdoutIndex] + predicted$rowIndex <- seq(along.with = y)[holdoutIndex] predicted <- cbind(predicted, info$loop[parm,,drop = FALSE]) } @@ -525,7 +525,7 @@ nominalSbfWorkflow <- function(x, y, ppOpts, ctrl, lev, ...) { `%op%` <- getOper(ctrl$allowParallel && getDoParWorkers() > 1) result <- foreach( - iter = seq(along = resampleIndex), + iter = seq(along.with = resampleIndex), .combine = "c", .verbose = FALSE, .errorhandling = "stop", @@ -552,7 +552,7 @@ nominalSbfWorkflow <- function(x, y, ppOpts, ctrl, lev, ...) { if(ctrl$saveDetails) { tmpPred <- sbfResults$pred tmpPred$Resample <- names(resampleIndex)[iter] - tmpPred$rowIndex <- seq(along = y)[unique(holdoutIndex)] + tmpPred$rowIndex <- seq(along.with = y)[unique(holdoutIndex)] } else tmpPred <- NULL resamples <- ctrl$functions$summary(sbfResults$pred, lev = lev) if(is.factor(y) && length(lev) <= 50) @@ -582,7 +582,7 @@ nominalSbfWorkflow <- function(x, y, ppOpts, ctrl, lev, ...) { const <- 1-exp(-1) - for(p in seq(along = perfNames)) + for(p in seq(along.with = perfNames)) performance[perfNames[p]] <- (const * performance[perfNames[p]]) + ((1-const) * apparent[perfNames[p]]) } @@ -609,7 +609,7 @@ looSbfWorkflow <- function(x, y, ppOpts, ctrl, lev, ...) { `%op%` <- getOper(ctrl$allowParallel && getDoParWorkers() > 1) result <- foreach( - iter = seq(along = resampleIndex), + iter = seq(along.with = resampleIndex), .combine = "c", .verbose = FALSE, .errorhandling = "stop", @@ -662,7 +662,7 @@ nominalRfeWorkflow <- function(x, y, sizes, ppOpts, ctrl, lev, ...) } `%op%` <- getOper(ctrl$allowParallel && getDoParWorkers() > 1) - result <- foreach(iter = seq(along = resampleIndex), .combine = "c", .verbose = FALSE, .errorhandling = "stop", .packages = "caret") %op% + result <- foreach(iter = seq(along.with = resampleIndex), .combine = "c", .verbose = FALSE, .errorhandling = "stop", .packages = "caret") %op% { loadNamespace("caret") requireNamespace("plyr") @@ -694,7 +694,7 @@ nominalRfeWorkflow <- function(x, y, sizes, ppOpts, ctrl, lev, ...) ## If the user did not have nrow(x) in 'sizes', rfeIter added it. ## So, we need to find out how many set of predictions there are: nReps <- length(table(rfeResults$pred$Variables)) - rfeResults$pred$rowIndex <- rep(seq(along = y)[unique(holdoutIndex)], nReps) + rfeResults$pred$rowIndex <- rep(seq(along.with = y)[unique(holdoutIndex)], nReps) } if(is.factor(y) && length(lev) <= 50) { @@ -730,7 +730,7 @@ nominalRfeWorkflow <- function(x, y, sizes, ppOpts, ctrl, lev, ...) if(ctrl$method %in% c("boot632")) { externPerf <- merge(externPerf, apparent) - for(p in seq(along = perfNames)) + for(p in seq(along.with = perfNames)) { const <- 1-exp(-1) externPerf[, perfNames[p]] <- (const * externPerf[, perfNames[p]]) + ((1-const) * externPerf[, paste(perfNames[p],"Apparent", sep = "")]) @@ -749,7 +749,7 @@ looRfeWorkflow <- function(x, y, sizes, ppOpts, ctrl, lev, ...) resampleIndex <- ctrl$index `%op%` <- getOper(ctrl$allowParallel && getDoParWorkers() > 1) - result <- foreach(iter = seq(along = resampleIndex), .combine = "c", .verbose = FALSE, .errorhandling = "stop", .packages = "caret") %op% + result <- foreach(iter = seq(along.with = resampleIndex), .combine = "c", .verbose = FALSE, .errorhandling = "stop", .packages = "caret") %op% { loadNamespace("caret") requireNamespaceQuietStop("methods") diff --git a/pkg/caret/man/maxDissim.Rd b/pkg/caret/man/maxDissim.Rd index a83f98a2..76fb5848 100644 --- a/pkg/caret/man/maxDissim.Rd +++ b/pkg/caret/man/maxDissim.Rd @@ -89,7 +89,7 @@ example <- function(pct = 1, obj = minDiss, ...) xlab = "variable 1", ylab = "variable 2") points(base, pch = 16, cex = .7) - for(i in seq(along = newSamp)) + for(i in seq(along.with = newSamp)) points( pool[newSamp[i],1], pool[newSamp[i],2], diff --git a/pkg/caret/man/plsda.Rd b/pkg/caret/man/plsda.Rd index 18d40e5e..4ad1e72a 100644 --- a/pkg/caret/man/plsda.Rd +++ b/pkg/caret/man/plsda.Rd @@ -83,7 +83,7 @@ for the posterior probability calculations. \dontrun{ data(mdrr) set.seed(1) -inTrain <- sample(seq(along = mdrrClass), 450) +inTrain <- sample(seq(along.with = mdrrClass), 450) nzv <- nearZeroVar(mdrrDescr) filteredDescr <- mdrrDescr[, -nzv] diff --git a/pkg/caret/man/sensitivity.Rd b/pkg/caret/man/sensitivity.Rd index 9a062e2f..8c9a8265 100644 --- a/pkg/caret/man/sensitivity.Rd +++ b/pkg/caret/man/sensitivity.Rd @@ -159,7 +159,7 @@ negPredValue(pred, truth, prevalence = 0.25) prev <- seq(0.001, .99, length = 20) npvVals <- ppvVals <- prev * NA -for(i in seq(along = prev)) +for(i in seq(along.with = prev)) { ppvVals[i] <- posPredValue(pred, truth, prevalence = prev[i]) npvVals[i] <- negPredValue(pred, truth, prevalence = prev[i]) diff --git a/pkg/caret/tests/testthat/test_resamples.R b/pkg/caret/tests/testthat/test_resamples.R index d97d516e..1d3629ec 100644 --- a/pkg/caret/tests/testthat/test_resamples.R +++ b/pkg/caret/tests/testthat/test_resamples.R @@ -41,7 +41,7 @@ test_that('resample calculations', { test_that('test group-k-fold', { get_data <- function(n = 500) { - prevalence <- seq(.1, .9, length = 26) + prevalence <- seq(.1, .9, length.out = 26) dat <- sample(letters, size = n, replace = TRUE, prob = sample(prevalence)) data.frame(grp = dat, stringsAsFactors = TRUE) }