Voir la notice de l'article provenant de la source Library of Science
@article{IJAMCS_2006_16_3_a8, author = {Clempner, J.}, title = {Modeling shortest path games with {Petri} nets: {A} {Lyapunov} based theory}, journal = {International Journal of Applied Mathematics and Computer Science}, pages = {387--397}, publisher = {mathdoc}, volume = {16}, number = {3}, year = {2006}, language = {en}, url = {http://geodesic.mathdoc.fr/item/IJAMCS_2006_16_3_a8/} }
TY - JOUR AU - Clempner, J. TI - Modeling shortest path games with Petri nets: A Lyapunov based theory JO - International Journal of Applied Mathematics and Computer Science PY - 2006 SP - 387 EP - 397 VL - 16 IS - 3 PB - mathdoc UR - http://geodesic.mathdoc.fr/item/IJAMCS_2006_16_3_a8/ LA - en ID - IJAMCS_2006_16_3_a8 ER -
%0 Journal Article %A Clempner, J. %T Modeling shortest path games with Petri nets: A Lyapunov based theory %J International Journal of Applied Mathematics and Computer Science %D 2006 %P 387-397 %V 16 %N 3 %I mathdoc %U http://geodesic.mathdoc.fr/item/IJAMCS_2006_16_3_a8/ %G en %F IJAMCS_2006_16_3_a8
Clempner, J. Modeling shortest path games with Petri nets: A Lyapunov based theory. International Journal of Applied Mathematics and Computer Science, Tome 16 (2006) no. 3, pp. 387-397. http://geodesic.mathdoc.fr/item/IJAMCS_2006_16_3_a8/
[1] Axelrod R. (1984): The Evolution of Cooperation.-New York: Basic Books.
[2] Bellman R.E. (1957): Dynamic Programming. - Princeton: Princeton University Press.
[3] Bertsekas D.P. and Shreve S.E. (1978): Stochastic Optimal Control: The Discrete Time Case. - New York: Academic Press.
[4] Bertsekas D.P. (1987): Dynamic Programming: Deterministic and Stochastic Models. -- Englewood Cliffs: Prentice - Hall.
[5] Bertsekas D.P. and Tsitsiklis J.N. (1989): Parallel and Distributed Computation: Numerical Methods. - Englewood Cliffs: Prentice-Hall.
[6] Bertsekas D.P. and Tsitsiklis J.N. (1991): An analysis of stochastic shortest path problems. - Math. Oper. Res., Vol. 16, No. 3, pp. 580-595.
[7] Blackwell D. (1967): Positive dynamic programming. - Proc. 5th Berkeley Symp. Math., Statist., and Probability, Berkeley, California, Vol. 1, pp. 415-418.
[8] Clempner J. (2005): Colored decision process petri nets: modeling, analysis and stability. - Int. J. Appl. Math. Comput. Sci., Vol. 15, No. 3, pp. 405-420.
[9] Clempner J. (2006): Towards modeling the shortest path problem and games with petri nets. - Proc. Doctoral Consortium at 27-th Int. Conf. Application and Theory of Petri Nets and Other Models Of Concurrency, Turku, Finland, pp. 1-12.
[10] Derman C. (1970): Finite State Markovian Decision Processes. -New York: Academic Press.
[11] Dynkin E.B. (1963): The optimum choice of the instant for stopping a Markov process.-Soviet Math. Doklady, Vol. 150, pp. 238-240.
[12] Eaton J.H. and Zadeh L.A. (1962): Optimal pursuit strategies in discrete state probabilistic systems. - Trans. ASME Ser. D, J. Basic Eng., Vol. 84, pp. 23-29.
[13] Grigelionis R.I. and Shiryaev A.N. (1966): On Stefan's problem and optimal stopping rules for Markov processes. - Theory Prob. Applic., Vol. 11, pp. 541-558.
[14] Hernández-Lerma O. and Lasserre J.B. (1996): Discrete-Time Markov Control Process: Basic Optimality Criteria. - Berlin: Springer.
[15] Hernández-Lerma O., Carrasco G. and Pére-Hernández R. (1999): Markov Control Processes with the Expected Total Cost Criterion: Optimality. - Stability and Transient Model. Acta Applicadae Matematicae, Vol. 59, No. 3, pp. 229-269.
[16] Hernández-Lerma O. and Lasserre J.B. (1999): Futher Topics on Discrete-Time Markov Control Process. - Berlin: Springer.
[17] Hinderer K. and Waldmann K.H (2003): The critical discount factor for finite Markovian decision process with an absorbing set.- Math. Meth. Oper. Res., Vol. 57, pp. 1-19.
[18] Hinderer K. and Waldmann K.H. (2005): Algorithms for countable state Markov decision model with an absorbing set. -SIAM J. Contr. Optim., Vol. 43, pp. 2109-2131.
[19] Howard R.A. (1960): Dynamic Programming and Markov Processes. -Cambridge, MA: MIT Press.
[20] Kalman R.E. and Bertram J.E. (1960): Control system analysis and design via the "Second Method" of Lyapunov. -- J. Basic Eng., ASME, Vol. 82(D), pp. 371-393.
[21] Kuhn H.W. and Nasae S. (Eds.) (2002): The Essential John Nash. -Princeton: Princeton University Press.
[22] Kumar P.R. and Shiau T.H. (1981): Zero sum dynamic games, In: Control and Dynamic Games, (C.T. Leondes, Ed.) - Academic Press, pp. 1345-1378.
[23] Kushner H.J. and Chamberlain S.G. (1969): Finite state stochastic games: Existence theorems and computational procedures. IEEE Trans. Automat. Contr., Vol. 14, No. 3.
[24] Kushner H. (1971): Introduction to Stochastic Control. - New York: Holt, Rinehart and Winston.
[25] Lakshmikantham S. Leela and Martynyuk A.A. (1990): Practical Stability of Nonlinear Systems. - Singapore: World Scientific.
[26] Lakshmikantham V., Matrosov V.M. and Sivasundaram S. (1991): Vector Lyapunov Functions and Stability Analysis of Nonlinear Systems. -Dordrecht: Kluwer.
[27] Mandl P. and Seneta E. (1969): The theory of non-negative matrices in a dynamic programming problem. - Austral. J.Stat., Vol. 11, pp. 85-96.
[28] Nash J. (1951): Non-cooperative games.-Ann. Math., Vol. 54, pp. 287-295.
[29] Nash J. (1996): Essays on Game Theory. -Cheltenham: Elgar.
[30] Pallu de la Barriere R. (1967): Optimal Control Theory. - Philadelphia: Saunders.
[31] Patek S.D. (1997): Stochastic Shortest Path Games: Theory and Algorithms. - Ph.D. thesis, Dep. Electr. Eng. Comput. Sci., MIT, Cambridge, MA.
[32] Patek S.D. and Bertsekas D.P. (1999): Stochastic shortest path games.-SIAM J. Contr. Optim., Vol. 37, No. 3, pp. 804- 824.
[33] Patek S.D. (2001): On terminating Markov decision processes with a risk averse objective function. - Automatica, Vol. 37, No. 9, pp. 1379-1386.
[34] Pliska S.R. (1978): On the transient case for Markov decision chains with general state space, In: Dynamic Programming and Its Applications, (M.L. Puterman, Ed.). - New York: Springer, pp 335-349.
[35] Puterman M.L. (1994): Markov Decision Processes: Discrete Stochastic Dynamic Programming. -New York: Wiley.
[36] Rieder U. (1975): Bayesian dynamic programming. - Adv. Appl. Prob., Vol. 7, pp. 330-348.
[37] Shapley L.S. (1953): Stochastic Games. - Proc. National. Acad. Sci., Mathematics, Vol. 39, pp. 1095-1100.
[38] Shiryaev A.N. (1978): Optimal Stopping Problems. - New York: Springer.
[39] Strauch R. (1966): Negative dynamic programming. - Ann. Math. Stat., Vol. 37, pp. 871-890.
[40] Van der Wal J. (1981): Stochastic dynamic programming. - Math. Centre Tracts 139, Mathematisch Centrum, Amsterdam.
[41] Veinott A.F., Jr. (1969): Discrete dynamic programming with sensitive discount optimality criteria. - Ann. Math. Stat., Vol. 40, No. 5, pp. 1635-1660.
[42] Whittle P. (1983): Optimization over Time.-New York: Wiley, Vol. 2.