Skip to content

Commit

Permalink
fixed compute_interval ans some formatting. the docs strings have yet…
Browse files Browse the repository at this point in the history
… to be updated from the old file.
  • Loading branch information
pasq-cat committed Aug 13, 2024
1 parent a1808ef commit 0914e03
Show file tree
Hide file tree
Showing 4 changed files with 118 additions and 121 deletions.
111 changes: 58 additions & 53 deletions src/conformal_models/inductive_bayes_classification.jl
Original file line number Diff line number Diff line change
@@ -1,68 +1,73 @@
# Simple
"The `BayesClassifier` is the simplest approach to Inductive Conformalized Bayes."
mutable struct BayesClassifier{Model <: Supervised} <: ConformalProbabilisticSet
model::Model
coverage::AbstractFloat
scores::Union{Nothing,AbstractArray}
heuristic::Function
train_ratio::AbstractFloat
end
# Simple
"The `BayesClassifier` is the simplest approach to Inductive Conformalized Bayes."
mutable struct BayesClassifier{Model<:Supervised} <: ConformalProbabilisticSet
model::Model
coverage::AbstractFloat
scores::Union{Nothing,AbstractArray}
heuristic::Function
train_ratio::AbstractFloat
end

function BayesClassifier(model::Supervised; coverage::AbstractFloat=0.95, heuristic::Function=f(y, ŷ)=-ŷ, train_ratio::AbstractFloat=0.5)
return BayesClassifier(model, coverage, nothing, heuristic, train_ratio)
end
function BayesClassifier(
model::Supervised;
coverage::AbstractFloat=0.95,
heuristic::Function=f(y, ŷ) = -ŷ,
train_ratio::AbstractFloat=0.5,
)
return BayesClassifier(model, coverage, nothing, heuristic, train_ratio)
end

@doc raw"""
MMI.fit(conf_model::BayesClassifier, verbosity, X, y)
@doc raw"""
MMI.fit(conf_model::BayesClassifier, verbosity, X, y)
For the [`BayesClassifier`](@ref) nonconformity scores are computed as follows:
For the [`BayesClassifier`](@ref) nonconformity scores are computed as follows:
``
S_i^{\text{CAL}} = s(X_i, Y_i) = h(\hat\mu(X_i), Y_i), \ i \in \mathcal{D}_{\text{calibration}}
``
``
S_i^{\text{CAL}} = s(X_i, Y_i) = h(\hat\mu(X_i), Y_i), \ i \in \mathcal{D}_{\text{calibration}}
``
A typical choice for the heuristic function is ``h(\hat\mu(X_i), Y_i)=1-\hat\mu(X_i)_{Y_i}`` where ``\hat\mu(X_i)_{Y_i}`` denotes the softmax output of the true class and ``\hat\mu`` denotes the model fitted on training data ``\mathcal{D}_{\text{train}}``. The simple approach only takes the softmax probability of the true label into account.
"""
function MMI.fit(conf_model::BayesClassifier, verbosity, X, y)

A typical choice for the heuristic function is ``h(\hat\mu(X_i), Y_i)=1-\hat\mu(X_i)_{Y_i}`` where ``\hat\mu(X_i)_{Y_i}`` denotes the softmax output of the true class and ``\hat\mu`` denotes the model fitted on training data ``\mathcal{D}_{\text{train}}``. The simple approach only takes the softmax probability of the true label into account.
"""
function MMI.fit(conf_model::BayesClassifier, verbosity, X, y)

# Data Splitting:
Xtrain, ytrain, Xcal, ycal = split_data(conf_model, X, y)

# Training:
fitresult, cache, report = MMI.fit(conf_model.model, verbosity, Xtrain, ytrain)
# Training:
fitresult, cache, report = MMI.fit(conf_model.model, verbosity, Xtrain, ytrain)

# Nonconformity Scores:
= pdf.(MMI.predict(conf_model.model, fitresult, Xcal), ycal) # predict returns a vector of distributions
conf_model.scores = @.(conf_model.heuristic(ycal, ŷ))
# Nonconformity Scores:
= pdf.(MMI.predict(conf_model.model, fitresult, Xcal), ycal) # predict returns a vector of distributions
conf_model.scores = @.(conf_model.heuristic(ycal, ŷ))

return (fitresult, cache, report)
end
return (fitresult, cache, report)
end

@doc raw"""
MMI.predict(conf_model::BayesClassifier, fitresult, Xnew)
@doc raw"""
MMI.predict(conf_model::BayesClassifier, fitresult, Xnew)
For the [`BayesClassifier`](@ref) prediction sets are computed as follows,
For the [`BayesClassifier`](@ref) prediction sets are computed as follows,
``
\hat{C}_{n,\alpha}(X_{n+1}) = \left\{y: s(X_{n+1},y) \le \hat{q}_{n, \alpha}^{+} \{S_i^{\text{CAL}}\} \right\}, \ i \in \mathcal{D}_{\text{calibration}}
``
``
\hat{C}_{n,\alpha}(X_{n+1}) = \left\{y: s(X_{n+1},y) \le \hat{q}_{n, \alpha}^{+} \{S_i^{\text{CAL}}\} \right\}, \ i \in \mathcal{D}_{\text{calibration}}
``
where ``\mathcal{D}_{\text{calibration}}`` denotes the designated calibration data.
"""
function MMI.predict(conf_model::BayesClassifier, fitresult, Xnew)
= MMI.predict(conf_model.model, fitresult, MMI.reformat(conf_model.model, Xnew)...)
v = conf_model.scores
= qplus(v, conf_model.coverage)
= map(p̂) do pp
where ``\mathcal{D}_{\text{calibration}}`` denotes the designated calibration data.
"""
function MMI.predict(conf_model::BayesClassifier, fitresult, Xnew)
= MMI.predict(conf_model.model, fitresult, MMI.reformat(conf_model.model, Xnew)...)
v = conf_model.scores
= qplus(v, conf_model.coverage)
= map(p̂) do pp
L =.decoder.classes
probas = pdf.(pp, L)
is_in_set = 1.0 .- probas .<=
if !all(is_in_set .== false)
pp = UnivariateFinite(L[is_in_set], probas[is_in_set])
else
pp = missing
end
return pp
end
return
end
probas = pdf.(pp, L)
is_in_set = 1.0 .- probas .<=
if !all(is_in_set .== false)
pp = UnivariateFinite(L[is_in_set], probas[is_in_set])
else
pp = missing
end
return pp
end
return
end
125 changes: 60 additions & 65 deletions src/conformal_models/inductive_bayes_regression.jl
Original file line number Diff line number Diff line change
@@ -1,108 +1,103 @@
#using LaplaceRedux.LaplaceRegression
using LaplaceRedux: LaplaceRegression

"The `BayesRegressor` is the simplest approach to Inductive Conformalized Bayes."
mutable struct BayesRegressor{Model<:Supervised} <: ConformalInterval
model::Model
coverage::AbstractFloat
scores::Union{Nothing,AbstractArray}
heuristic::Function
train_ratio::AbstractFloat
end


"The `BayesRegressor` is the simplest approach to Inductive Conformalized Bayes."
mutable struct BayesRegressor{Model <: Supervised} <: ConformalInterval
model::Model
coverage::AbstractFloat
scores::Union{Nothing,AbstractArray}
heuristic::Function
train_ratio::AbstractFloat
end

function ConformalBayes(y, fμ, fvar)
std= sqrt.(fvar)
function ConformalBayes(y, fμ, fvar)
# compute the standard deviation from the variance
std = sqrt.(fvar)
# Compute the probability density
coeff = 1 ./ (std .* sqrt(2 * π))
exponent = -((y .- fμ).^2) ./ (2 .* std.^2)
exponent = -((y .- fμ) .^ 2) ./ (2 .* std .^ 2)
return -coeff .* exp.(exponent)
end
end

function compute_interval(fμ, fvar, q̂ )
# Define the standard deviation
function compute_interval(fμ, fvar, q̂)
# compute the standard deviation from the variance
std = sqrt.(fvar)


delta= std .* sqrt.(-2* log.(-.* std .* sqrt(2π) ))
#find the half range so that f(y|x)> -q assuming data are gaussian distributed
delta = std .* sqrt.(-2 * log.(-.* std .* sqrt(2π)))
# Calculate the interval
lower_bound =.- delta
upper_bound =.+ delta

data= hcat(lower_bound, upper_bound)

data = hcat(lower_bound, upper_bound)


return delta
return data
end


function BayesRegressor(model::Supervised; coverage::AbstractFloat=0.95, heuristic::Function=ConformalBayes, train_ratio::AbstractFloat=0.5)
#@assert typeof(model.model) == :Laplace "Model must be of type Laplace"
@assert typeof(model)== LaplaceRegression "Model must be of type Laplace"
function BayesRegressor(
model::Supervised;
coverage::AbstractFloat=0.95,
heuristic::Function=ConformalBayes,
train_ratio::AbstractFloat=0.5,
)
@assert typeof(model) == LaplaceRegression "Model must be of type Laplace"
return BayesRegressor(model, coverage, nothing, heuristic, train_ratio)
end
end

@doc raw"""
MMI.fit(conf_model::BayesRegressor, verbosity, X, y)
@doc raw"""
MMI.fit(conf_model::BayesRegressor, verbosity, X, y)
For the [`BayesRegressor`](@ref) nonconformity scores are computed as follows:
For the [`BayesRegressor`](@ref) nonconformity scores are computed as follows:
``
S_i^{\text{CAL}} = s(X_i, Y_i) = h(\hat\mu(X_i), Y_i), \ i \in \mathcal{D}_{\text{calibration}}
``
``
S_i^{\text{CAL}} = s(X_i, Y_i) = h(\hat\mu(X_i), Y_i), \ i \in \mathcal{D}_{\text{calibration}}
``
A typical choice for the heuristic function is ``h(\hat\mu(X_i), Y_i)=1-\hat\mu(X_i)_{Y_i}`` where ``\hat\mu(X_i)_{Y_i}`` denotes the softmax output of the true class and ``\hat\mu`` denotes the model fitted on training data ``\mathcal{D}_{\text{train}}``. The simple approach only takes the softmax probability of the true label into account.
"""
function MMI.fit(conf_model::BayesRegressor, verbosity, X, y)
A typical choice for the heuristic function is ``h(\hat\mu(X_i), Y_i)=1-\hat\mu(X_i)_{Y_i}`` where ``\hat\mu(X_i)_{Y_i}`` denotes the softmax output of the true class and ``\hat\mu`` denotes the model fitted on training data ``\mathcal{D}_{\text{train}}``. The simple approach only takes the softmax probability of the true label into account.
"""
function MMI.fit(conf_model::BayesRegressor, verbosity, X, y)
# Data Splitting:
Xtrain, ytrain, Xcal, ycal = split_data(conf_model, X, y)

# Training:
# Training:
fitresult, cache, report = MMI.fit(
conf_model.model, verbosity, MMI.reformat(conf_model.model, Xtrain, ytrain)...)

lap= fitresult[1]
conf_model.model, verbosity, MMI.reformat(conf_model.model, Xtrain, ytrain)...
)

lap = fitresult[1]

# Nonconformity Scores:
# Nonconformity Scores:
#yhat = MMI.predict(conf_model.model, fitresult[2], Xcal)
yhat = MMI.predict( fitresult[2], fitresult , Xcal)
yhat = MMI.predict(fitresult[2], fitresult, Xcal)

= vcat([x[1] for x in yhat]...)
fvar = vcat([x[2] for x in yhat]...)
cache=()
report=()


cache = ()
report = ()

conf_model.scores = @.(conf_model.heuristic(ycal, fμ, fvar))
conf_model.scores = @.(conf_model.heuristic(ycal, fμ, fvar))

return (fitresult, cache, report)
end
return (fitresult, cache, report)
end

@doc raw"""
MMI.predict(conf_model::BayesRegressor, fitresult, Xnew)
@doc raw"""
MMI.predict(conf_model::BayesRegressor, fitresult, Xnew)
For the [`BayesRegressor`](@ref) prediction sets are computed as follows,
For the [`BayesRegressor`](@ref) prediction sets are computed as follows,
``
\hat{C}_{n,\alpha}(X_{n+1}) = \left\{y: s(X_{n+1},y) \le \hat{q}_{n, \alpha}^{+} \{S_i^{\text{CAL}}\} \right\}, \ i \in \mathcal{D}_{\text{calibration}}
``
``
\hat{C}_{n,\alpha}(X_{n+1}) = \left\{y: s(X_{n+1},y) \le \hat{q}_{n, \alpha}^{+} \{S_i^{\text{CAL}}\} \right\}, \ i \in \mathcal{D}_{\text{calibration}}
``
where ``\mathcal{D}_{\text{calibration}}`` denotes the designated calibration data.
"""
function MMI.predict(conf_model::BayesRegressor, fitresult, Xnew)
where ``\mathcal{D}_{\text{calibration}}`` denotes the designated calibration data.
"""
function MMI.predict(conf_model::BayesRegressor, fitresult, Xnew)
chain = fitresult
yhat = MMI.predict(conf_model.model, fitresult, Xnew )
yhat = MMI.predict(conf_model.model, fitresult, Xnew)
= vcat([x[1] for x in yhat]...)
fvar = vcat([x[2] for x in yhat]...)
v = conf_model.scores
= qplus(v, conf_model.coverage)
data = compute_interval(fμ, fvar,q̂ )

data = compute_interval(fμ, fvar, q̂)


return data
end
return data
end
1 change: 0 additions & 1 deletion src/conformal_models/inductive_classification.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ function score(conf_model::ConformalProbabilisticSet, fitresult, X, y=nothing)
return score(conf_model, conf_model.model, fitresult, X, y)
end


# Simple
"The `SimpleInductiveClassifier` is the simplest approach to Inductive Conformal Classification. Contrary to the [`NaiveClassifier`](@ref) it computes nonconformity scores using a designated calibration dataset."
mutable struct SimpleInductiveClassifier{Model<:Supervised} <: ConformalProbabilisticSet
Expand Down
2 changes: 0 additions & 2 deletions src/conformal_models/utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,5 @@ end
Check if the model is a classification model or a regression model
"""
function is_classifier(model::Supervised)

return target_scitype(model) <: AbstractVector{<:Finite}

end

0 comments on commit 0914e03

Please sign in to comment.