@article{PRZYBYLA2021102653, title = {When classification accuracy is not enough: Explaining news credibility assessment}, journal = {Information Processing & Management}, volume = {58}, number = {5}, pages = {102653}, year = {2021}, issn = {0306-4573}, doi = {https://doi.org/10.1016/j.ipm.2021.102653}, url = {https://www.sciencedirect.com/science/article/pii/S0306457321001412}, author = {Piotr Przybyła and Axel J. Soto}, keywords = {Visual analytics, Credibility, Text classification, Fake news, Natural language processing}, abstract = {Dubious credibility of online news has become a major problem with negative consequences for both readers and the whole society. Despite several efforts in the development of automatic methods for measuring credibility in news stories, there has been little previous work focusing on providing explanations that go beyond a black-box decision or score. In this work, we use two machine learning approaches for computing a credibility score for any given news story: one is a linear method trained on stylometric features and the other one is a recurrent neural network. Our goal is to study whether we can explain the rationale behind these automatic methods and improve a reader’s confidence in their credibility assessment. Therefore, we first adapted the classifiers to the constraints of a browser extension so that the text can be analysed while browsing online news. We also propose a set of interactive visualisations to explain to the user the rationale behind the automatic credibility assessment. We evaluated our adapted methods by means of standard machine learning performance metrics and through two user studies. The adapted neural classifier showed better performance on the test data than the stylometric classifier, despite the latter appearing to be easier to interpret by the participants. Also, users were significantly more accurate in their assessment after they interacted with the tool as well as more confident with their decisions.} }