Voir la notice de l'article provenant de la source Library of Science
@article{IJAMCS_2015_25_3_a12, author = {Villacorta, P. J. and Pelta, D. A.}, title = {A repeated imitation model with dependence between stages: {Decision} strategies and rewards}, journal = {International Journal of Applied Mathematics and Computer Science}, pages = {617--630}, publisher = {mathdoc}, volume = {25}, number = {3}, year = {2015}, language = {en}, url = {http://geodesic.mathdoc.fr/item/IJAMCS_2015_25_3_a12/} }
TY - JOUR AU - Villacorta, P. J. AU - Pelta, D. A. TI - A repeated imitation model with dependence between stages: Decision strategies and rewards JO - International Journal of Applied Mathematics and Computer Science PY - 2015 SP - 617 EP - 630 VL - 25 IS - 3 PB - mathdoc UR - http://geodesic.mathdoc.fr/item/IJAMCS_2015_25_3_a12/ LA - en ID - IJAMCS_2015_25_3_a12 ER -
%0 Journal Article %A Villacorta, P. J. %A Pelta, D. A. %T A repeated imitation model with dependence between stages: Decision strategies and rewards %J International Journal of Applied Mathematics and Computer Science %D 2015 %P 617-630 %V 25 %N 3 %I mathdoc %U http://geodesic.mathdoc.fr/item/IJAMCS_2015_25_3_a12/ %G en %F IJAMCS_2015_25_3_a12
Villacorta, P. J.; Pelta, D. A. A repeated imitation model with dependence between stages: Decision strategies and rewards. International Journal of Applied Mathematics and Computer Science, Tome 25 (2015) no. 3, pp. 617-630. http://geodesic.mathdoc.fr/item/IJAMCS_2015_25_3_a12/
[1] Amigoni, F., Basilico, N. and Gatti, N. (2009). Finding the optimal strategies for robotic patrolling with adversaries in topologically-represented environments, Proceedings of the 26th International Conference on Robotics and Automation (ICRA’09), Kobe, Japan, pp. 819–824.
[2] Cichosz, P. and Pawełczak, Ł. (2014). Imitation learning of car driving skills with decision trees and random forests, International Journal of Applied Mathematics and Computer Science 24(3): 579–597, DOI: 10.2478/amcs-2014-0042.
[3] Conitzer, V. and Sandholm, T. (2006). Computing the optimal strategy to commit to, Proceedings of the 7th ACM Conference on Electronic Commerce, EC’06, Ann Arbor, MI, USA, pp. 82–90.
[4] Kott, A. and McEneany, W.M. (2007). Adversarial Reasoning: Computational Approaches to Reading the Opponents Mind, Chapman and Hall/CRC, Boca Raton, FL.
[5] McLennan, A. and Tourky, R. (2006). From imitation games to Kakutani, http://cupid.economics.uq.edu. au/mclennan/Papers/kakutani60.pdf, (unpublished).
[6] McLennan, A. and Tourky, R. (2010a). Imitation games and computation, Games and Economic Behavior 70(1): 4–11.
[7] McLennan, A. and Tourky, R. (2010b). Simple complexity from imitation games, Games and Economic Behavior 68(2): 683–688.
[8] Osborne, M. and Rubinstein, A. (1994). A Course in Game Theory, MIT Press, Cambridge, MA.
[9] Paruchuri, P., Pearce, J.P. and Kraus, S. (2008). Playing games for security: An efficient exact algorithm for solving Bayesian Stackelberg games, Proceedings of the 7th International Conference on Autonomous Agents and Multi-Agent Systems (AAMAS’08), Estoril, Portugal, pp. 895–902.
[10] Pelta, D. and Yager, R. (2009). On the conflict between inducing confusion and attaining payoff in adversarial decision making, Information Sciences 179(1–2): 33–40.
[11] Price, K., Storn, R. and Lampinen, J. (2005). Differential Evolution: A Practical Approach to Global Optimization, Natural Computing Series, Springer-Verlag New York, Inc., Syracuse, NJ.
[12] Qin, A.K., Huang, V.L. and Suganthan, P.N. (2009). Differential evolution: Algorithm with strategy adaptation for global numerical optimization, IEEE Transactions on Evolutionary Computation 13(2): 398–417.
[13] Storn, R. and Price, K. (1997). Differential evolution: A simple and efficient heuristic for global optimization over continuous spaces, Journal of Global Optimization 11(10): 341–359.
[14] Tambe, M. (2012). Security and Game Theory: Algorithms, Deployed Systems, Lessons Learned, Cambridge University Press, New York, NY.
[15] Thagard, P. (1992). Adversarial problem solving: Modeling an opponent using explanatory coherence, Cognitive Science 16(1): 123–149.
[16] Triguero, I., Garcia, S. and Herrera, F. (2011). Differential evolution for optimizing the positioning of prototypes in nearest neighbor classification, Pattern Recognition 44(4): 901–916.
[17] Villacorta, P.J. and Pelta, D.A. (2012). Theoretical analysis of expected payoff in an adversarial domain, Information Sciences 186(4): 93–104.
[18] Villacorta, P.J., Pelta, D.A. and Lamata, M.T. (2013). Forgetting as a way to avoid deception in a repeated imitation game, Autonomous Agents and Multi-Agent Systems 27(3): 329–354.
[19] Villacorta, P. and Pelta, D. (2011). Expected payoff analysis of dynamic mixed strategies in an adversarial domain, Proceedings of the 2011 IEEE Symposium on Intelligent Agents (IA 2011), Paris, France, pp. 116–122.