@article{21a936d644d04d77b1e3d19f2e30a2b1,
title = "Identity and the limits of fair assessment",
abstract = "In many assessment problems—aptitude testing, hiring decisions, appraisals of the risk of recidivism, evaluation of the credibility of testimonial sources, and so on—the fair treatment of different groups of individuals is an important goal. But individuals can be legitimately grouped in many different ways. Using a framework and fairness constraints explored in research on algorithmic fairness, I show that eliminating certain forms of bias across groups for one way of classifying individuals can make it impossible to eliminate such bias across groups for another way of dividing people up. And this point generalizes if we require merely that assessments be approximately bias-free. Moreover, even if the fairness constraints are satisfied for some given partitions of the population, the constraints can fail for the coarsest common refinement, that is, the partition generated by taking intersections of the elements of these coarser partitions. This shows that these prominent fairness constraints admit the possibility of forms of intersectional bias.",
keywords = "algorithmic fairness, bias, calibration, equalized odds, intersectionality",
author = "Stewart, {Rush T.}",
note = "Funding Information: Thanks to Marshall Bierson, Mike Bishop, Yang Liu, Michael Nielsen, Ignacio Ojea Quintana, Shanna Slank, Tom Sterkenburg, Reuben Stern, Borut Trpin, audiences at the Center for Advanced Studies (CAS) at LMU Munich and the Faculty of Philosophy at the University of Groningen, three anonymous referees at Social Choice and Welfare, and two anonymous referees at the Journal of Theoretical Politics for helpful conversations and feedback. I am grateful to CAS and Longview Philanthropy for providing research leave, and to the Cambridge-LMU Strategic Partnership for funding the Decision Theory and the Future of Artificial Intelligence group. Funding Information: Thanks to Marshall Bierson, Mike Bishop, Yang Liu, Michael Nielsen, Ignacio Ojea Quintana, Shanna Slank, Tom Sterkenburg, Reuben Stern, Borut Trpin, audiences at the Center for Advanced Studies (CAS) at LMU Munich and the Faculty of Philosophy at the University of Groningen, three anonymous referees at Social Choice and Welfare, and two anonymous referees at the Journal of Theoretical Politics for helpful conversations and feedback. I am grateful to CAS and Longview Philanthropy for providing research leave, and to the Cambridge-LMU Strategic Partnership for funding the Decision Theory and the Future of Artificial Intelligence group. Publisher Copyright: {\textcopyright} The Author(s) 2022.",
year = "2022",
month = jul,
doi = "10.1177/09516298221102972",
language = "English",
volume = "34",
pages = "415--442",
journal = "JOURNAL OF THEORETICAL POLITICS",
issn = "0951-6298",
publisher = "SAGE Publications Ltd",
number = "3",
}