Skip to content

Commit

Permalink
rebuilding site Sat May 25 13:51:14 MPST 2024
Browse files Browse the repository at this point in the history
  • Loading branch information
AL-JiongYang committed May 25, 2024
1 parent 908f12a commit 447721c
Show file tree
Hide file tree
Showing 7 changed files with 126 additions and 0 deletions.
12 changes: 12 additions & 0 deletions content/publication/AAAI23frp/cite.bib
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
@inproceedings{AAAI23frp,
author = {Xuanxiang Huang and
Yacine Izza and
Joao Marques{-}Silva},
title = {Solving Explainability Queries with Quantification: The Case of Feature
Relevancy},
booktitle = {Thirty-Seventh {AAAI} Conference on Artificial Intelligence, {AAAI}
2023},
pages = {3996--4006},
publisher = {{AAAI} Press},
year = {2023}
}
18 changes: 18 additions & 0 deletions content/publication/AAAI23frp/index.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
---
abstract: "Trustable explanations of machine learning (ML) models are vital in high-risk uses of artificial intelligence (AI). Apart from the computation of trustable explanations, a number of explainability queries have been identified and studied in recent work. Some of these queries involve solving quantification problems, either in propositional or in more expressive logics. This paper investigates one of these quantification problems, namely the feature relevancy problem (FRP), i.e.\ to decide whether a (possibly sensitive) feature can occur in some explanation of a prediction. In contrast with earlier work, that studied FRP for specific classifiers, this paper proposes a novel algorithm for the FRP quantification problem which is applicable to any ML classifier that meets minor requirements. Furthermore, the paper shows that the novel algorithm is efficient in practice. The experimental results, obtained using random forests (RFs) induced from well-known publicly available datasets, demonstrate that the proposed solution outperforms existing state-of-the-art solvers for Quantified Boolean Formulas (QBF) by orders of magnitude. Finally, the paper also identifies a novel family of formulas that are challenging for currently state-of-the-art QBF solvers."

authors:
- Xuanxiang Huang
- Yacine Izza
- Joao Marques-Silva
date: 2023-02-01 00:00:00
highlight: true
image_preview: ''
math: true
publication: In *Proceedings of the Thirty-Seventh AAAI Conference on Artificial Intelligence, (AAAI)*
publication_types:
- '1'
selected: true
title: 'Solving Explainability Queries with Quantification: The Case of Feature Relevancy'
url_pdf: https://ojs.aaai.org/index.php/AAAI/article/view/25514
---
13 changes: 13 additions & 0 deletions content/publication/IJAR23/cite.bib
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
@article{ijar23,
author = {Yacine Izza and
Xuanxiang Huang and
Alexey Ignatiev and
Nina Narodytska and
Martin C. Cooper and
Joao Marques{-}Silva},
title = {On computing probabilistic abductive explanations},
journal = {Int. J. Approx. Reason.},
volume = {159},
pages = {108939},
year = {2023}
}
21 changes: 21 additions & 0 deletions content/publication/IJAR23/index.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
---
abstract: 'The most widely studied explainable AI (XAI) approaches are unsound. This is the case with well-known model-agnostic explanation approaches, and it is also the case with approaches based on saliency maps. One solution is to consider intrinsic interpretability, which does not exhibit the drawback of unsoundness. Unfortunately, intrinsic interpretability can display unwieldy explanation redundancy. Formal explainability represents the alternative to these non-rigorous approaches, with one example being PI-explanations. Unfortunately, PI-explanations also exhibit important drawbacks, the most visible of which is arguably their size. Recently, it has been observed that the (absolute) rigor of PI-explanations can be traded off for a smaller explanation size, by computing the so-called relevant sets. Given some positive {\delta}, a set S of features is {\delta}-relevant if, when the features in S are fixed, the probability of getting the target class exceeds {\delta}. However, even for very simple classifiers, the complexity of computing relevant sets of features is prohibitive, with the decision problem being NPPP-complete for circuit-based classifiers. In contrast with earlier negative results, this paper investigates practical approaches for computing relevant sets for a number of widely used classifiers that include Decision Trees (DTs), Naive Bayes Classifiers (NBCs), and several families of classifiers obtained from propositional languages. Moreover, the paper shows that, in practice, and for these families of classifiers, relevant sets are easy to compute. Furthermore, the experiments confirm that succinct sets of relevant features can be obtained for the families of classifiers considered.'

authors:
- Yacine Izza
- Xuanxiang Huang
- Alexey Ignatiev
- Nina Narodytska
- Martin C. Cooper
- Joao Marques-Silva
date: 2023-04-01 00:00:00
highlight: true
image_preview: ''
math: true
publication: In *Int. J. Approx. Reason. (IJAR)*
publication_types:
- '2'
selected: true
title: 'On computing probabilistic abductive explanations'
url_pdf: https://www.sciencedirect.com/science/article/abs/pii/S0888613X23000701?via%3Dihub
---
6 changes: 6 additions & 0 deletions content/publication/IJCAI23jt/cite.bib
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
@inproceedings{IJCAI23jt,
author={Yacine Izza, Alexey Ignatiev, Joao Marques-Silva},
title={On Tackling Explanation Redundancy in Decision Trees},
booktitle={Proceedings of the International Joint Conference on Artificial Intelligence, {IJCAI}},
year={2023}
}
17 changes: 17 additions & 0 deletions content/publication/IJCAI23jt/index.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
---
abstract: "Claims about the interpretability of decision trees can be traced back to the origins of machine learning (ML). Indeed, given some input consistent with a decision tree's path, the explanation for the resulting prediction consists of the features in that path. Moreover, a growing number of works propose the use of decision trees, and of other so-called interpretable models, as a possible solution for deploying ML models in high-risk applications. This paper overviews recent theoretical and practical results which demonstrate that for most decision trees, tree paths exhibit so-called explanation redundancy, in that logically sound explanations can often be significantly more succinct than what the features in the path dictates. More importantly, such decision tree explanations can be computed in polynomial-time, and so can be produced with essentially no effort other than traversing the decision tree. The experimental results, obtained on a large range of publicly available decision trees, support the paper's claims."

authors:
- Yacine Izza
- Alexey Ignatiev
- Joao Marques-Silva
date: 2023-05-01 00:00:00
highlight: true
image_preview: ''
math: true
publication: In *Proceedings of the International Joint Conference on Artificial Intelligence (IJCAI)*
publication_types:
- '1'
selected: true
title: 'On Tackling Explanation Redundancy in Decision Trees'
---
39 changes: 39 additions & 0 deletions content/talk/19092023.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
+++
date = 2023-09-19T00:00:00 # Schedule page publish date.

title = '<b> <a href="https://arxiv.org/pdf/2206.00667.pdf">Our paper </a> on explaining the sources of bias in machine learning via influence functions has been accepted in FAccT 2023. </b> Authors: Bishwamittra Ghosh, Debabrota Basu and Kuldeep S. Meel. <br> We combine explainability with fairness in machine learning, where we compute the influence of individual features and the intersectional effect of multiple features on the resulting bias of a classifier on a dataset. This allows us to have a higher granular depiction of sources of bias than earlier methods.'
time_start = 2023-09-19T00:00:00
#time_end = 2030-06-01T15:00:00
abstract = ""
abstract_short = ""
event = ""
event_url = ""
location = ""

# Is this a selected talk? (true/false)
selected = false

# Projects (optional).
# Associate this talk with one or more of your projects.
# Simply enter the filename (excluding '.md') of your project file in `content/project/`.
projects = []

# Links (optional).
url_pdf = ""
url_slides = ""
url_video = ""
url_code = ""

# Does the content use math formatting?
math = true

# Does the content use source code highlighting?
highlight = true

# Featured image
# Place your image in the `static/img/` folder and reference its filename below, e.g. `image = "example.jpg"`.
[header]
image = ""
caption = ""

+++

0 comments on commit 447721c

Please sign in to comment.