diff --git a/docs/Project.toml b/docs/Project.toml index f8f8855..8ac4752 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,3 +1,11 @@ [deps] +BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" +ImageCore = "a09fc81d-aa75-5fe9-8630-4744c3626534" +ImageIO = "82e4d734-157c-48bb-816b-45c225c6df19" +ImageShow = "4e3cecfd-b093-5904-9786-8bbb286a6a31" +MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458" +Metalhead = "dbeba491-748d-5e0e-a39e-b530a07fa0cc" XAIBase = "9b48221d-a747-4c1b-9860-46a1d8ba24a7" +Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" diff --git a/docs/make.jl b/docs/make.jl index 43b96f8..256d0ba 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -8,8 +8,9 @@ makedocs(; format = Documenter.HTML(; prettyurls=get(ENV, "CI", "false") == "true", assets=String[]), #! format: off pages = [ - "XAIBase Interface" => "index.md", - "API Reference" => "api.md" + "XAIBase Interface" => "index.md", + "Example Implementations" => "examples.md", + "API Reference" => "api.md" ], #! format: on linkcheck = true, diff --git a/docs/src/api.md b/docs/src/api.md index 71bd60f..d6e043b 100644 --- a/docs/src/api.md +++ b/docs/src/api.md @@ -1,4 +1,5 @@ -## Using analyzers +# API Reference +## Computing explanations Most methods in the Julia-XAI ecosystem work by calling `analyze` on an input and an analyzer: ```@docs analyze @@ -22,7 +23,7 @@ TopNFeatures ``` ## Internals -Neuron selectors: +### Output selection ```@docs XAIBase.MaxActivationSelector XAIBase.IndexSelector diff --git a/docs/src/examples.md b/docs/src/examples.md new file mode 100644 index 0000000..1975350 --- /dev/null +++ b/docs/src/examples.md @@ -0,0 +1,99 @@ +# [Example Implementations](@id examples) +The following examples demonstrate the implementation of XAI methods using the XAIBase.jl interface. +To evaluate our methods, we load a small, pre-trained LeNet5 model and the MNIST dataset: + +```@example implementations +using Flux +using BSON + +model = BSON.load("model.bson", @__MODULE__)[:model] # load pre-trained LeNet-5 model +``` + +```@example implementations +using MLDatasets +using ImageCore, ImageIO, ImageShow + +index = 10 +x, y = MNIST(Float32, :test)[10] + +# By convention in Flux.jl, the input needs to be resized to WHCN format +# by adding a color channel and batch dimensions. +input = reshape(x, 28, 28, 1, :); + +convert2image(MNIST, x) +``` + +## Example 1: Random explanation +To get started, we implement a nonsensical method +that returns a random explanation in the shape of the input. + +```@example implementations +using XAIBase + +struct RandomAnalyzer{M} <: AbstractXAIMethod + model::M +end + +function (method::RandomAnalyzer)(input, output_selector::AbstractNeuronSelector) + output = method.model(input) + output_selection = output_selector(output) + + val = rand(size(input)...) + return Explanation(val, output, output_selection, :RandomAnalyzer, :sensitivity, nothing) +end +``` + +We can directly use XAIBase's `analyze` and `heatmap` functions +to compute and visualize the random explanation: + +```@example implementations +analyzer = RandomAnalyzer(model) +heatmap(input, analyzer) +``` + +As expected, the explanation is just noise. + +## Example 2: Input sensitivity +In this second example, we naively reimplement the `Gradient` analyzer from +[ExplainableAI.jl](https://github.com/Julia-XAI/ExplainableAI.jl). + +```@example implementations +using XAIBase +using Zygote: gradient + +struct MyGradient{M} <: AbstractXAIMethod + model::M +end + +function (method::MyGradient)(input, output_selector::AbstractNeuronSelector) + output = method.model(input) + output_selection = output_selector(output) + + grad = gradient((x) -> only(method.model(x)[output_selection]), input) + val = only(grad) + return Explanation(val, output, output_selection, :MyGradient, :sensitivity, nothing) +end +``` + +!!! note + [ExplainableAI.jl](https://github.com/Julia-XAI/ExplainableAI.jl) + implements the `Gradient` analyzer in a more efficient way + that works with batched inputs and only requires a single forward + and backward pass through the model. + +Once again, we can directly use XAIBase's `analyze` and `heatmap` functions +```@example implementations +analyzer = MyGradient(model) +expl = analyze(input, analyzer) +heatmap(expl) +``` + +```@example implementations +heatmap(expl, reduce=:norm, colorscheme=:twilight) +``` + +and make use of all the features provided by the Julia-XAI ecosystem. + +!!! note + For an introduction to the [Julia-XAI ecosystem](https://github.com/Julia-XAI), + please refer to the [*Getting started* guide](https://julia-xai.github.io/XAIDocs/). diff --git a/docs/src/index.md b/docs/src/index.md index 83aec0b..04a0f5e 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -29,7 +29,9 @@ This only requires you to fulfill the following two requirements: Refer to the [`Explanation`](@ref) documentation for a description of the expected fields. For more information, take a look at [`src/XAIBase.jl`](https://github.com/Julia-XAI/XAIBase.jl/blob/main/src/XAIBase.jl). -## Example implementation +## Implementation template +Julia-XAI methods will usually follow the following template: + ```julia struct MyMethod{M} <: AbstractXAIMethod model::M @@ -44,3 +46,5 @@ function (method::MyMethod)(input, output_selector::AbstractNeuronSelector) return Explanation(val, output, output_selection, :MyMethod, :attribution, extras) end ``` + +Refer to the [example implementations](@ref examples) for more information. \ No newline at end of file