From 4fd1595797089b1281583329a658ff0f39e43f60 Mon Sep 17 00:00:00 2001 From: "Olivier @ CREATIS" Date: Tue, 29 Oct 2024 11:00:28 +0100 Subject: [PATCH] Update collections/_posts/2024-10-20-tabular-explainability.md Co-authored-by: Nathan Painchaud <23144457+nathanpainchaud@users.noreply.github.com> --- collections/_posts/2024-10-20-tabular-explainability.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/collections/_posts/2024-10-20-tabular-explainability.md b/collections/_posts/2024-10-20-tabular-explainability.md index 526e91bb..26951878 100755 --- a/collections/_posts/2024-10-20-tabular-explainability.md +++ b/collections/_posts/2024-10-20-tabular-explainability.md @@ -18,7 +18,7 @@ pdf: "https://arxiv.org/pdf/2302.14278" # Highlights * Investigate explainable models based on transformers for tabular data -* Use of knowledge distillation (master/student) to train a single head but multi-layers (blocs) transformer to facilitate explicability analysis +* Use of knowledge distillation (master/student) to train a single head but multi-layers (blocks) transformer to facilitate explicability analysis * Propose a graph-oriented explainability method based on the set of single head attention matrices * Compare this approach to attention-, gradient-, and perturbation-based explainability methods