Commit 77649e2b authored by Armin Rauschenberger's avatar Armin Rauschenberger
Browse files

automation

parent 3ae79a7b
^Readme\.Rmd$
^\.travis\.yml$
^_pkgdown\.yml$
^docs$
^cran-comments\.md$
^appveyor\.yml$
This diff is collapsed.
# Generated by roxygen2: do not edit by hand # Generated by roxygen2: do not edit by hand
export()
export(bilasso) export(bilasso)
export(colasso) export(colasso)
export(colasso_compare) export(colasso_compare)
......
...@@ -36,10 +36,14 @@ ...@@ -36,10 +36,14 @@
#' loss function for logistic regression #' loss function for logistic regression
#' (the deviance is used for linear regression) #' (the deviance is used for linear regression)
#' #'
#' @param res
#' resolution
#'
#' @examples #' @examples
#' NA #' NA
#' #'
bilasso <- function(y,cutoff,X,alpha=1,nfolds=10,type.measure="deviance"){ bilasso <- function(y,cutoff,X,alpha=1,nfolds=10,type.measure="deviance",res=100){
# think about scaling y (for transformation of pred. val. to pred. prob.)
# checks # checks
.check(x=y,type="vector") .check(x=y,type="vector")
...@@ -48,6 +52,7 @@ bilasso <- function(y,cutoff,X,alpha=1,nfolds=10,type.measure="deviance"){ ...@@ -48,6 +52,7 @@ bilasso <- function(y,cutoff,X,alpha=1,nfolds=10,type.measure="deviance"){
.check(x=alpha,type="scalar",min=0,max=1) .check(x=alpha,type="scalar",min=0,max=1)
.check(x=nfolds,type="scalar",min=3) .check(x=nfolds,type="scalar",min=3)
.check(x=type.measure,type="string",values=c("deviance","class","mse","mae","auc")) .check(x=type.measure,type="string",values=c("deviance","class","mse","mae","auc"))
.check(x=res,type="scalar",min=10)
if(length(y)!=nrow(X)){stop("Contradictory sample size.")} if(length(y)!=nrow(X)){stop("Contradictory sample size.")}
# binarisation # binarisation
...@@ -62,13 +67,20 @@ bilasso <- function(y,cutoff,X,alpha=1,nfolds=10,type.measure="deviance"){ ...@@ -62,13 +67,20 @@ bilasso <- function(y,cutoff,X,alpha=1,nfolds=10,type.measure="deviance"){
fit$binomial <- glmnet::glmnet(y=z,x=X,family="binomial",alpha=alpha) fit$binomial <- glmnet::glmnet(y=z,x=X,family="binomial",alpha=alpha)
# weights # weights
fit$pi <- seq(from=0,to=1,length.out=101) # INCREASE length.out to 101 and 100, respectively !!!!!!!!!!!!!!!!!!!!!!!!!!!
fit$pi <- seq(from=0,to=1,length.out=res+1)
#fit$base <- exp(seq(from=log(1),to=log(100),length.out=100)) # old base
fit$base <- exp(seq(from=log(0.05*sd(y)),to=log(2*sd(y)),length.out=res)) # new base
#fit$grid <- expand.grid(pi=fit$pi,base=fit$base) # temporary
# cross-validation # cross-validation
pred <- list() pred <- list()
pred$y <- matrix(data=NA,nrow=length(y),ncol=length(fit$gaussian$lambda)) pred$y <- matrix(data=NA,nrow=length(y),ncol=length(fit$gaussian$lambda))
pred$z <- matrix(data=NA,nrow=length(y),ncol=length(fit$binomial$lambda)) pred$z <- matrix(data=NA,nrow=length(y),ncol=length(fit$binomial$lambda))
pred$pi <- matrix(data=NA,nrow=length(y),ncol=length(fit$pi)) pred$pi <- matrix(data=NA,nrow=length(y),ncol=length(fit$pi))
pred$base <- matrix(data=NA,nrow=length(y),ncol=length(fit$base))
#pred$grid <- matrix(data=NA,nrow=length(y),ncol=nrow(fit$grid))
for(k in unique(foldid)){ for(k in unique(foldid)){
y0 <- y[foldid!=k] y0 <- y[foldid!=k]
...@@ -92,11 +104,26 @@ bilasso <- function(y,cutoff,X,alpha=1,nfolds=10,type.measure="deviance"){ ...@@ -92,11 +104,26 @@ bilasso <- function(y,cutoff,X,alpha=1,nfolds=10,type.measure="deviance"){
pred$z[foldid==k,seq_len(ncol(temp))] <- temp pred$z[foldid==k,seq_len(ncol(temp))] <- temp
z_hat <- temp[,which.min(cvm)] z_hat <- temp[,which.min(cvm)]
# fusion # fusion (pi)
for(i in seq_along(fit$pi)){ for(i in seq_along(fit$pi)){
pred$pi[foldid==k,i] <- fit$pi[i]*(y_hat > cutoff) + (1-fit$pi[i])*z_hat #pred$pi[foldid==k,i] <- fit$pi[i]*(y_hat > cutoff) + (1-fit$pi[i])*z_hat # original
cont <- stats::pnorm(q=y_hat,mean=cutoff,sd=sd(y)) # trial
pred$pi[foldid==k,i] <- fit$pi[i]*cont + (1-fit$pi[i])*z_hat #trial
}
# fusion (base)
for(i in seq_along(fit$base)){
#pred$base[foldid==k,i] <- 1/(1+fit$base[i]^(cutoff-y_hat)) # old trial
pred$base[foldid==k,i] <- stats::pnorm(q=y_hat,mean=cutoff,sd=fit$base[i]) # new trial
} }
# fusion (pi and base)
#for(i in seq_len(nrow(fit$grid))){
# cont <- stats::pnorm(q=y_hat,mean=cutoff,sd=fit$grid$base[i])
# temp <- fit$grid$pi[i]*cont + (1-fit$grid$pi[i])*z_hat
# pred$grid[foldid==k,i] <- temp
#}
} }
# deviance (not comparable between Gaussian and binomial families) # deviance (not comparable between Gaussian and binomial families)
...@@ -104,8 +131,12 @@ bilasso <- function(y,cutoff,X,alpha=1,nfolds=10,type.measure="deviance"){ ...@@ -104,8 +131,12 @@ bilasso <- function(y,cutoff,X,alpha=1,nfolds=10,type.measure="deviance"){
fit$gaussian$lambda.min <- fit$gaussian$lambda[which.min(fit$gaussian$cvm)] fit$gaussian$lambda.min <- fit$gaussian$lambda[which.min(fit$gaussian$cvm)]
fit$binomial$cvm <- .loss(y=z,fit=pred$z,family="binomial",type.measure=type.measure)[[1]] fit$binomial$cvm <- .loss(y=z,fit=pred$z,family="binomial",type.measure=type.measure)[[1]]
fit$binomial$lambda.min <- fit$binomial$lambda[which.min(fit$binomial$cvm)] fit$binomial$lambda.min <- fit$binomial$lambda[which.min(fit$binomial$cvm)]
fit$cvm <- .loss(y=z,fit=pred$pi,family="binomial",type.measure=type.measure)[[1]] fit$pi.cvm <- .loss(y=z,fit=pred$pi,family="binomial",type.measure=type.measure)[[1]]
fit$pi.min <- fit$pi[which.min(fit$cvm)] fit$pi.min <- fit$pi[which.min(fit$pi.cvm)]
fit$base.cvm <- .loss(y=z,fit=pred$base,family="binomial",type.measure=type.measure)[[1]]
fit$base.min <- fit$base[which.min(fit$base.cvm)]
#fit$grid.cvm <- .loss(y=z,fit=pred$grid,family="binomial",type.measure=type.measure)[[1]]
#fit$grid.min <- fit$grid[which.min(fit$grid.cvm),]
fit$cutoff <- cutoff fit$cutoff <- cutoff
class(fit) <- "bilasso" class(fit) <- "bilasso"
...@@ -124,43 +155,74 @@ coef.bilasso <- function(x){ ...@@ -124,43 +155,74 @@ coef.bilasso <- function(x){
return(coef) return(coef)
} }
# It would even be better to replace max(abs(gaussian-cutoff)) by a fixed value.
predict.bilasso <- function(x,newx,type="response",...){ predict.bilasso <- function(x,newx,type="response",...){
# gaussian
# predicted values - gaussian
s <- x$gaussian$lambda.min s <- x$gaussian$lambda.min
gaussian <- as.numeric(stats::predict(object=x$gaussian,newx=newx,s=s,type=type,...)) pred_y <- as.numeric(stats::predict(object=x$gaussian,newx=newx,s=s,type=type,...))
gaussian <- (gaussian/max(abs(gaussian))+1)/2 # trial
# It would even be better to replace max(abs(gaussian)) by a fixed value, # predicted values - binomial
# but make sure that min>=0 and max<=1. s <- x$binomial$lambda.min
#gaussian <- 1*(gaussian > x$cutoff) # original pred_z <- as.numeric(stats::predict(object=x$binomial,newx=newx,s=s,type=type,...))
# gaussian
old <- 1*(pred_y > x$cutoff) # original
gaussian <- ((pred_y-x$cutoff)/max(abs(pred_y-x$cutoff))+1)/2 # trial
if(any((pred_y>=x$cutoff)!=(gaussian>=0.5))){
stop("Wrong check sum.",call.=FALSE)
}
if(any(round(gaussian)!=old)){
stop("Not compatible.",call.=FALSE)
}
if(any(gaussian<0|gaussian>1)){ if(any(gaussian<0|gaussian>1)){
stop("unit interval",call.=FALSE) stop("unit interval",call.=FALSE)
} }
# binomial # binomial
s <- x$binomial$lambda.min binomial <- pred_z
binomial <- as.numeric(stats::predict(object=x$binomial,newx=newx,s=s,type=type,...))
# mixed # pi-model
mixed <- x$pi.min*gaussian + (1-x$pi.min)*binomial pi <- x$pi.min*gaussian + (1-x$pi.min)*binomial
if(any((gaussian <= mixed) != (mixed < binomial))){ # check this if(any((gaussian <= pi) != (pi < binomial))){ # check this
stop("consistency",call.=FALSE) #browser()
warning("consistency",call.=FALSE) # check why this happens
} }
frame <- data.frame(gaussian=gaussian,binomial=binomial,mixed=mixed) # base-model
#base <- 1/(1+x$base.min^(x$cutoff-pred_y)) # old trial
base <- stats::pnorm(q=pred_y,mean=x$cutoff,sd=x$base.min) # new trial
# # grid
#cont <- stats::pnorm(q=pred_y,mean=x$cutoff,sd=x$grid.min$base)
#grid <- x$grid.min$pi*cont + (1-x$grid.min$pi)*pred_z
# # trial
cont <- stats::pnorm(q=pred_y,mean=x$cutoff,sd=x$base.min)
trial <- x$pi.min*cont + (1-x$pi.min)*pred_z
frame <- data.frame(gaussian=gaussian,binomial=binomial,pi=pi,base=base,trial=trial)
return(frame) return(frame)
} }
bilasso_compare <- function(y,cutoff,X,type.measure="deviance"){
bilasso_compare <- function(y,cutoff,X,type.measure="deviance",res=100){
z <- 1*(y > cutoff) z <- 1*(y > cutoff)
fold <- palasso:::.folds(y=z,nfolds=5) fold <- palasso:::.folds(y=z,nfolds=5)
pred <- matrix(data=NA,nrow=length(y),ncol=3, cols <- c("gaussian","binomial","pi","base","trial")
dimnames=list(NULL,c("gaussian","binomial","mixed"))) pred <- matrix(data=NA,nrow=length(y),ncol=length(cols),
dimnames=list(NULL,cols))
select <- list() select <- list()
for(i in sort(unique(fold))){ for(i in sort(unique(fold))){
fit <- bilasso(y=y[fold!=i],cutoff=cutoff,X=X[fold!=i,],type.measure=type.measure) fit <- bilasso(y=y[fold!=i],cutoff=cutoff,X=X[fold!=i,],type.measure=type.measure,res=res)
#gaussian <- 1*(stats::predict(object=fit$gaussian, #gaussian <- 1*(stats::predict(object=fit$gaussian,
# newx=X[fold==i,], # newx=X[fold==i,],
...@@ -175,19 +237,30 @@ bilasso_compare <- function(y,cutoff,X,type.measure="deviance"){ ...@@ -175,19 +237,30 @@ bilasso_compare <- function(y,cutoff,X,type.measure="deviance"){
# pred[fold==i,"binomial"] <- binomial # pred[fold==i,"binomial"] <- binomial
# pred[fold==i,"mixed"] <- fit$pi.min*pred[fold==i,"gaussian"] + (1-fit$pi.min)*pred[fold==i,"binomial"] # pred[fold==i,"mixed"] <- fit$pi.min*pred[fold==i,"gaussian"] + (1-fit$pi.min)*pred[fold==i,"binomial"]
temp <- predict.bilasso(fit,newx=X[fold==i,]) temp <- colasso:::predict.bilasso(fit,newx=X[fold==i,])
pred[fold==i,"gaussian"] <- temp$gaussian model <- colnames(pred)
pred[fold==i,"binomial"] <- temp$binomial for(j in seq_along(model)){
pred[fold==i,"mixed"] <- temp$mixed pred[fold==i,model[j]] <- temp[[model[j]]]
}
#pred[fold==i,"gaussian"] <- temp$gaussian
#pred[fold==i,"binomial"] <- temp$binomial
#pred[fold==i,"mixed"] <- temp$mixed
#pred[fold==i,"extra"] <- temp$extra
#pred[fold==i,"grid"] <- temp$grid
} }
loss <- list() type <- c("deviance","class","mse","mae","auc")
loss$deviance <- .loss(y=z,fit=pred,family="binomial",type.measure="deviance")[[1]] loss <- lapply(X=type,FUN=function(x) .loss(y=z,fit=pred,family="binomial",type.measure=x,foldid=fold)[[1]])
loss$class <- .loss(y=z,fit=pred,family="binomial",type.measure="class")[[1]] names(loss) <- type
loss$mse <- .loss(y=z,fit=pred,family="binomial",type.measure="mse")[[1]]
loss$mae <- .loss(y=z,fit=pred,family="binomial",type.measure="mae")[[1]] #loss <- list()
loss$auc <- .loss(y=z,fit=pred,family="binomial",type.measure="auc",foldid=fold)[[1]] #loss$deviance <- .loss(y=z,fit=pred,family="binomial",type.measure="deviance")[[1]]
#loss$class <- .loss(y=z,fit=pred,family="binomial",type.measure="class")[[1]]
#loss$mse <- .loss(y=z,fit=pred,family="binomial",type.measure="mse")[[1]]
#loss$mae <- .loss(y=z,fit=pred,family="binomial",type.measure="mae")[[1]]
#loss$auc <- .loss(y=z,fit=pred,family="binomial",type.measure="auc",foldid=fold)[[1]]
return(loss) return(loss)
} }
......
if(FALSE){
#' @export
#' @title
#' Logistic regression with a continuous response
#'
#' @description
#' Implements penalised logistic regression
#' with both a binary and a continuous response.
#'
#' @details
#' Finds a compromise between binomial (\eqn{pi=0})
#' and linear (\eqn{pi=1}) regression.
#'
#' @param y
#' continuous response\strong{:}
#' vector of length \eqn{n}
#'
#' @param cutoff
#' value between \code{min(y)} and \code{max(y)}
#'
#' @param X
#' covariates\strong{:}
#' matrix with \eqn{n} rows (samples)
#' and \eqn{p} columns (variables)
#'
#' @param alpha
#' elastic net parameter\strong{:}
#' numeric between \eqn{1} and \eqn{0};
#' compromise between lasso (\eqn{alpha=1})
#' and ridge (\eqn{alpha=0}) regression
#'
#' @param nfolds
#' number of folds
#'
#' @param type.measure
#' loss function for logistic regression
#' (the deviance is used for linear regression)
#'
#' @examples
#' NA
#'
bilasso <- function(y,cutoff,X,alpha=1,nfolds=10,type.measure="deviance"){
# think about scaling y (for transformation of pred. val. to pred. prob.)
# checks
.check(x=y,type="vector")
.check(x=cutoff,type="scalar",min=min(y),max=max(y))
.check(x=X,type="matrix")
.check(x=alpha,type="scalar",min=0,max=1)
.check(x=nfolds,type="scalar",min=3)
.check(x=type.measure,type="string",values=c("deviance","class","mse","mae","auc"))
if(length(y)!=nrow(X)){stop("Contradictory sample size.")}
# binarisation
z <- 1*(y > cutoff)
# fold identifiers
foldid <- palasso:::.folds(y=z,nfolds=nfolds)
# model fitting
fit <- list()
fit$gaussian <- glmnet::glmnet(y=y,x=X,family="gaussian",alpha=alpha)
fit$binomial <- glmnet::glmnet(y=z,x=X,family="binomial",alpha=alpha)
# weights
# INCREASE length.out to 101 and 100, respectively !!!!!!!!!!!!!!!!!!!!!!!!!!!
fit$pi <- seq(from=0,to=1,length.out=11)
#fit$base <- exp(seq(from=log(1),to=log(100),length.out=100)) # old base
fit$base <- exp(seq(from=log(0.05*sd(y)),to=log(2*sd(y)),length.out=10)) # new base
fit$grid <- expand.grid(pi=fit$pi,base=fit$base) # temporary
# cross-validation
pred <- list()
pred$y <- matrix(data=NA,nrow=length(y),ncol=length(fit$gaussian$lambda))
pred$z <- matrix(data=NA,nrow=length(y),ncol=length(fit$binomial$lambda))
pred$pi <- matrix(data=NA,nrow=length(y),ncol=length(fit$pi))
pred$base <- matrix(data=NA,nrow=length(y),ncol=length(fit$base))
pred$grid <- matrix(data=NA,nrow=length(y),ncol=nrow(fit$grid))
for(k in unique(foldid)){
y0 <- y[foldid!=k]
y1 <- y[foldid==k]
z0 <- z[foldid!=k]
z1 <- z[foldid==k]
X0 <- X[foldid!=k,,drop=FALSE]
X1 <- X[foldid==k,,drop=FALSE]
# linear regression
net <- glmnet::glmnet(y=y0,x=X0,family="gaussian",alpha=alpha)
temp <- stats::predict(object=net,newx=X1,type="response",s=fit$gaussian$lambda)
cvm <- .loss(y=y1,fit=temp,family="gaussian",type.measure="deviance")[[1]]
pred$y[foldid==k,seq_len(ncol(temp))] <- temp
y_hat <- temp[,which.min(cvm)]
# logistic regression
net <- glmnet::glmnet(y=z0,x=X0,family="binomial",alpha=alpha)
temp <- stats::predict(object=net,newx=X1,type="response",s=fit$binomial$lambda)
cvm <- .loss(y=z1,fit=temp,family="binomial",type.measure=type.measure)[[1]]
pred$z[foldid==k,seq_len(ncol(temp))] <- temp
z_hat <- temp[,which.min(cvm)]
# fusion (pi)
for(i in seq_along(fit$pi)){
pred$pi[foldid==k,i] <- fit$pi[i]*(y_hat > cutoff) + (1-fit$pi[i])*z_hat
}
# fusion (base)
for(i in seq_along(fit$base)){
#pred$base[foldid==k,i] <- 1/(1+fit$base[i]^(cutoff-y_hat)) # old trial
pred$base[foldid==k,i] <- stats::pnorm(q=y_hat,mean=cutoff,sd=fit$base[i]) # new trial
}
# fusion (pi and base)
for(i in seq_len(nrow(fit$grid))){
cont <- stats::pnorm(q=y_hat,mean=cutoff,sd=fit$grid$base[i])
temp <- fit$grid$pi[i]*cont + (1-fit$grid$pi[i])*z_hat
pred$grid[foldid==k,i] <- temp
}
}
# deviance (not comparable between Gaussian and binomial families)
fit$gaussian$cvm <- .loss(y=y,fit=pred$y,family="gaussian",type.measure="deviance")[[1]]
fit$gaussian$lambda.min <- fit$gaussian$lambda[which.min(fit$gaussian$cvm)]
fit$binomial$cvm <- .loss(y=z,fit=pred$z,family="binomial",type.measure=type.measure)[[1]]
fit$binomial$lambda.min <- fit$binomial$lambda[which.min(fit$binomial$cvm)]
fit$pi.cvm <- .loss(y=z,fit=pred$pi,family="binomial",type.measure=type.measure)[[1]]
fit$pi.min <- fit$pi[which.min(fit$pi.cvm)]
fit$base.cvm <- .loss(y=z,fit=pred$base,family="binomial",type.measure=type.measure)[[1]]
fit$base.min <- fit$base[which.min(fit$base.cvm)]
fit$grid.cvm <- .loss(y=z,fit=pred$grid,family="binomial",type.measure=type.measure)[[1]]
fit$grid.min <- fit$grid[which.min(fit$grid.cvm),]
fit$cutoff <- cutoff
class(fit) <- "bilasso"
return(fit)
}
coef.bilasso <- function(x){
s <- x$gaussian$lambda.min
beta <- glmnet::coef.glmnet(object=x$gaussian,s=s)
s <- x$binomial$lambda.min
gamma <- glmnet::coef.glmnet(object=x$binomial,s=s)
coef <- cbind(beta,gamma)
colnames(coef) <- c("beta","gamma")
return(coef)
}
# It would even be better to replace max(abs(gaussian-cutoff)) by a fixed value.
predict.bilasso <- function(x,newx,type="response",...){
# predicted values - gaussian
s <- x$gaussian$lambda.min
pred_y <- as.numeric(stats::predict(object=x$gaussian,newx=newx,s=s,type=type,...))
# predicted values - binomial
s <- x$binomial$lambda.min
pred_z <- as.numeric(stats::predict(object=x$binomial,newx=newx,s=s,type=type,...))
# gaussian
old <- 1*(pred_y > x$cutoff) # original
gaussian <- ((pred_y-x$cutoff)/max(abs(pred_y-x$cutoff))+1)/2 # trial
if(any((pred_y>=x$cutoff)!=(gaussian>=0.5))){
stop("Wrong check sum.",call.=FALSE)
}
if(any(round(gaussian)!=old)){
stop("Not compatible.",call.=FALSE)
}
if(any(gaussian<0|gaussian>1)){
stop("unit interval",call.=FALSE)
}
# binomial
binomial <- pred_z
# pi-model
pi <- x$pi.min*gaussian + (1-x$pi.min)*binomial
if(any((gaussian <= pi) != (pi < binomial))){ # check this
#browser()
warning("consistency",call.=FALSE) # check why this happens
}
# base-model
#base <- 1/(1+x$base.min^(x$cutoff-pred_y)) # old trial
base <- stats::pnorm(q=pred_y,mean=x$cutoff,sd=x$base.min) # new trial
# grid
cont <- stats::pnorm(q=pred_y,mean=x$cutoff,sd=x$grid.min$base)
grid <- x$grid.min$pi*cont + (1-x$grid.min$pi)*pred_z
# trial
cont <- stats::pnorm(q=pred_y,mean=x$cutoff,sd=x$base.min)
trial <- x$pi.min*cont + (1-x$pi.min)*pred_z
frame <- data.frame(gaussian=gaussian,binomial=binomial,pi=pi,base=base,grid=grid,trial=trial)
return(frame)
}
bilasso_compare <- function(y,cutoff,X,type.measure="deviance"){
z <- 1*(y > cutoff)
fold <- palasso:::.folds(y=z,nfolds=5)
cols <- c("gaussian","binomial","pi","base","grid","trial")
pred <- matrix(data=NA,nrow=length(y),ncol=length(cols),
dimnames=list(NULL,cols))
select <- list()
for(i in sort(unique(fold))){
fit <- bilasso(y=y[fold!=i],cutoff=cutoff,X=X[fold!=i,],type.measure=type.measure)
#gaussian <- 1*(stats::predict(object=fit$gaussian,
# newx=X[fold==i,],
# s=fit$gaussian$lambda.min,
# type="response") > cutoff)
#binomial <- stats::predict(object=fit$binomial,
# newx=X[fold==i,],
# s=fit$binomial$lambda.min,
# type="response")
#
# pred[fold==i,"gaussian"] <- gaussian
# pred[fold==i,"binomial"] <- binomial
# pred[fold==i,"mixed"] <- fit$pi.min*pred[fold==i,"gaussian"] + (1-fit$pi.min)*pred[fold==i,"binomial"]
temp <- colasso:::predict.bilasso(fit,newx=X[fold==i,])
model <- colnames(pred)
for(j in seq_along(model)){
pred[fold==i,model[j]] <- temp[[model[j]]]
}
#pred[fold==i,"gaussian"] <- temp$gaussian
#pred[fold==i,"binomial"] <- temp$binomial
#pred[fold==i,"mixed"] <- temp$mixed
#pred[fold==i,"extra"] <- temp$extra
#pred[fold==i,"grid"] <- temp$grid
}
type <- c("deviance","class","mse","mae","auc")
loss <- lapply(X=type,FUN=function(x) .loss(y=z,fit=pred,family="binomial",type.measure=x,foldid=fold)[[1]])
names(loss) <- type
#loss <- list()
#loss$deviance <- .loss(y=z,fit=pred,family="binomial",type.measure="deviance")[[1]]
#loss$class <- .loss(y=z,fit=pred,family="binomial",type.measure="class")[[1]]
#loss$mse <- .loss(y=z,fit=pred,family="binomial",type.measure="mse")[[1]]
#loss$mae <- .loss(y=z,fit=pred,family="binomial",type.measure="mae")[[1]]
#loss$auc <- .loss(y=z,fit=pred,family="binomial",type.measure="auc",foldid=fold)[[1]]
return(loss)
}
.check <- function(x,type,miss=FALSE,min=NULL,max=NULL,values=NULL){
name <- deparse(substitute(x))
if(type=="string"){
cond <- is.vector(x) & is.character(x) & length(x)==1
} else if(type=="scalar"){
cond <- is.vector(x) & is.numeric(x) & length(x)==1
} else if(type=="vector"){
cond <- is.vector(x) & is.numeric(x)
} else if(type=="matrix"){
cond <- is.matrix(x) & is.numeric(x)
} else {
warning("Unknown type.")
}
if(!cond){
stop(paste0("Argument \"",name,"\" does not match formal requirements."),call.=FALSE)
}
if(!miss && any(is.na(x))){
stop(paste0("Argument \"",name,"\" contains missing values."),call.=FALSE)
}
if(!is.null(min) && any(x<min)){
stop(paste0("expecting ",name," >= ",min),call.=FALSE)
}
if(!is.null(max) && any(x>max)){
stop(paste0("expecting ",name," <= ",max),call.=FALSE)
}
if(!is.null(values) && !(x %in% values)){
stop(paste0("Argument \"",name,"\" contains invalid values."),call.=FALSE)