@conference {1508622, title = {Robust Solutions in Stackelberg Games: Addressing Boundedly Rational Human Preference Models }, booktitle = {Association for the Advancement of Artificial Intelligence 4th Multidiciplinary Workshop on Advances in Preference Handling}, year = {2008}, abstract = {Stackelberg games represent an important class of games in which one player, the leader, commits to a strategy and the remaining players, the followers, make their decision with knowledge of the leader{\textquoteright}s commitment. Existing algorithms for Bayesian Stackelberg games find optimal solutions while modeling uncertainty over follower types with an a-priori probability distribution. Unfortunately, in real-world applications, the leader may also face uncertainty over the follower{\textquoteright}s response which makes the optimality guarantees of these algorithms fail. Such uncertainty arises because the follower{\textquoteright}s specific preferences or the follower{\textquoteright}s observations of the leader{\textquoteright}s strategy may not align with the rational strategy, and it is not amenable to a-priori probability distributions. These conditions especially hold when dealing with human subjects. To address these uncertainties while providing quality guarantees, we propose three new robust algorithms based on mixed-integer linear programs (MILPs) for Bayesian Stackelberg games. A key result of this paper is a detailed experimental analysis that demonstrates that these new MILPs deal better with human responses: a conclusion based on 800 games with 57 human subjects as followers. We also provide run-time results on these MILPs.}, author = {Jain, Manish and Fernando Ord{\textasciiacute}onez and Pita, James and Christopher Portway and Tambe, Milind and Craig Western and Praveen Paruchuri and Kraus, Sarit} }