@COMMENT This file was generated by bib2html.pl <https://sourceforge.net/projects/bib2html/> version 0.94
@COMMENT written by Patrick Riley <http://sourceforge.net/users/patstg/>
@COMMENT This file came from Kuldeep S. Meel's publication pages at
@COMMENT http://www.comp.nus.edu.sg/~meel/publications/
@inproceedings{NSMIS19,
title={Assessing Heuristic Machine Learning Explanations with Model Counting},
author={Narodytska, Nina and  Shrotri, Aditya and Meel,  Kuldeep S. and Ignatiev, Alexey and Marques Silva, Joao},
booktitle=SAT,
year={2019},
month=jul,
bib2html_dl_pdf={../Papers/sat19nsmis.pdf},
bib2html_pubtype={Refereed Conference},
bib2html_rescat={Formal Methods 4 ML},	
abstract={Machine Learning (ML) models are widely used in decision making
  procedures in finance, medicine, education, etc.
  In these areas, ML outcomes can directly affect humans, e.g.\ by
  deciding whether a person should get a loan or be released from
  prison. Therefore, we cannot blindly rely on black box ML models
  and need to explain the decisions made by them.
  This motivated the
  development of a variety of ML-explainer systems, concrete examples
  of which include LIME and its successor ANCHOR.  Due to the heuristic nature of explanations
  produced by existing tools, it is necessary to validate them.
  In this work, we propose a SAT-based method to assess the quality of explanations produced
  by ANCHOR
  We encode a trained ML model and an explanation for a given
  prediction as a propositional formula.
  Then, by using a state-of-the-art approximate model counter, we
  estimate the quality of the provided explanation as the number of
  solutions supporting it.
},
}
