\documentclass[12pt]{article}
\input{../../../../etc/macros}
\input{../../../../etc/mzlatex_macros}
%% \input{../../../../etc/pdf_macros}
\bibliographystyle{plain}
\begin{document}
\myheader \mytitle
\hr
\sectiontitle{Ruin Probabilities}
\hr
\usefirefox
\hr
\visual{Rating}{../../../../CommonInformation/Lessons/rating.png}
\section*{Rating} %one of
% Everyone: contains no mathematics.
% Student: contains scenes of mild algebra or calculus that may require guidance.
Mathematically Mature: may contain mathematics beyond calculus with
proofs. % Mathematicians Only: prolonged scenes of intense rigor.
\hr
\visual{QuestionofDay}{../../../../CommonInformation/Lessons/question_mark.png}
\section*{Question of the Day}
What is the solution of the equation \( x_n = a x_{n-1} \) where \( a \)
is a constant? What kind of a function is the solution? What more, if
anything, needs to be known to obtain a complete solution?
\hr
\visual{Key Concepts}{../../../../CommonInformation/Lessons/keyconcepts.png}
\section*{Key Concepts}
\begin{enumerate}
\item
The probabilities, interpretation, meaning, and consequences of
the ``gambler's ruin''.%
\index{gambler's ruin}
\end{enumerate}
\hr
\visual{Vocabulary}{../../../../CommonInformation/Lessons/vocabulary.png}
\section*{Vocabulary}
\begin{enumerate}
\item
\defn{Classical Ruin Problem} ``Consider the familiar gambler
who wins or loses a dollar with probabilities \( p \) and \( q =
1-p \), respectively playing against an infinitely rich
adversary who is always willing to play although the gambler has
the privilege of stopping at his pleasure. The gambler adopts
the strategy of playing until he either loses his capital (``is
ruined'') or increases it to \( a \) (with a net gain of \( a -
T_0 \).) We are interested in the probability of the gambler's
ruin and the probability distribution of the duration of the
game. This is the \defn{classical ruin problem.}''. (From W.
Feller, in \emph{Introduction to Probability Theory and
Applications, Volume I}, Chapter XIV, page 342.
\cite{feller73})
\end{enumerate}
\hr
\visual{Mathematical Ideas}{../../../../CommonInformation/Lessons/mathematicalideas.png}
\section*{Mathematical Ideas}
\subsection*{Understanding a Stochastic Process}
We consider a sequence of Bernoulli random variables, \( Y_1, Y_2, Y_3,
\dots \) where \( Y_i = +1 \) with probability \( p \) and \( Y_i = -1 \)
with probability \( q \). We start with an initial value \( T_0 \). We
define the sequence of sums \( T_n = \sum_{i=0}^n Y_i \). We are
interested in the stochastic process \( T_1, T_2, T_3, \dots \). It
turns out this is a complicated sequence to understand in full, so we
single out particular simpler features to understand first. For
example, we can look at the probability that the process will achieve
the value \( 0 \) before it achieves the value \( a \). This is a
special case of a larger class of probability problems called \emph{first-passage
probabilities.}%
\index{first-passage probabilities}
\subsection*{Theorems about Ruin Probabilities }
Consider a gambler who wins or loses a dollar on each turn of a game
with probabilities \( p \) and \( q=1-p \) respectively. Let his
initial capital be \( T_0 \). The game continues until the gambler's
capital either is reduced to \( 0 \) or has increased to \( a \). Let \(
q_{T_0} \) be the probability of the gambler's ultimate ruin and \( p_{T_0}
\) the probability of his winning. We shall show later that (see also
\link{http://www.math.unl.edu/~sdunbar1/MathematicalFinance/Lessons/CoinTossing/Duration/duration.xml}
{Duration of the Game Until Ruin}.)
\[
p_{T_0} + q_{T_0} =1
\]
so that we need not consider the possibility of an unending game.
\begin{theorem}
The probability of the gambler's ruin is
\[
q_{T_0} = \frac{(q/p)^a - (q/p)^{T_0}}{ (q/p)^a - 1 }
\]
if \( p \ne q \) and
\[
q_{T_0} = 1 - T_0/a
\]
if \( p = q = 1/2 \).
\end{theorem}
\begin{proof}
After the first trial the gambler's fortune is either \( T_0 - 1 \)
or \( T_0 + 1 \) and therefore we must have
\begin{equation}
q_{T_0} = p q_{T_0+1} + q q_{T_0-1}%
\label{eqnone}
\end{equation}
provided \( 1 < T_0 < a -1 \). For \( T_0 = 1 \), the first trial
may lead to ruin, and \eqref{eqnone} is replaced by
\[
q_1 = p q_2 + q.
\]
Similarly, for \( T_0 = a-1 \) the first trial may result in
victory, and therefore
\[
q_{a-1} = q q_{a-2}.
\]
To unify our equations, we define as a natural convention that \( q_0
= 1 \), and \( q_a = 0 \). Then the probability of ruin satisfies
\eqref{eqnone} for \( T_0 = 1,2, \ldots, a-1 \). This defines a set
of \( a-1 \) difference equations, with boundary conditions at \( 0 \)
and \( a \). If we solve the system of difference equations, then
we will have the desired probability \( q_{T_0} \) for any value of \(
T_{0} \).%
\index{difference equations}
Note that we can rewrite the difference equations as
\[
p q_{T_0} + q q_{T_0} = p q_{T_0 + 1} + q q_{T_0 - 1}.
\]
Then we can rearrange and factor to obtain
\[
\frac{ q_{T_0 + 1} - q_{T_0}}{q_{T_0} - q_{T_0 - 1}} = \frac{q}{p}
\]
This says the ratio of successive differences of \( q_{T_0} \) is
constant. This suggests that \( q_{T_0} \) is a power function,
\[
q_{T_0} = \lambda^{T_0}
\]
since power functions have this property.
We first take the case when \( p \ne q \). Then based on the guess
above (or also on standard theory for linear difference equations),
we try a solution of the form \( q_{T_0} = \lambda^{T_0} \). That
is
\[
\lambda^{T_0} = p \lambda^{T_0 + 1} + q \lambda^{T_0 - 1}.
\]
This reduces to
\[
p \lambda^2 - \lambda + q = 0.
\]
Since \( p+q = 1 \), this factors as
\[
(p \lambda - q) (\lambda -1) = 0,
\]
so the solutions are \( \lambda = q/p \), and \( \lambda = 1 \). (One
could also use the quadratic formula to obtain the same values, of
course.) Again by the standard theory of linear difference
equations, the general solution is%
\index{difference equations!general solution}
\begin{equation}
q_{T_0} = A \cdot 1 + B \cdot (q/p)^{T_0}%
\label{eqntwo}
\end{equation}
for some constants \( A \), and \( B \).
Now we determine the constants by using the boundary conditions:
\begin{align*}
q_0 &= A + B = 1 \\
q_a &= A + B (q/p)^a = 0.
\end{align*}
Solving, substituting, and simplifying:
\[
q_{T_0} = \frac{(q/p)^a - (q/p)^{T_0}}{ (q/p)^a - 1 }.
\]
(Check for yourself that with this expression \( 0 \le q_{T_0} \le 1
\) as it should be a for a probability.)
We should show that the solution is unique. So suppose \( r_{T_0} \)
is another solution of the difference equations. Given an arbitrary
solution of \eqref{eqnone}, the two constants \( A \) and \( B \)
can be determined so that \eqref{eqntwo} agrees with \( r_{T_0} \)
at \( T_0 =0 \) and \( T_0 = a \). (The reader should be able to
explain why by reference to a theorem in Linear Algebra!) From these
two values, all other values can be found by substituting in \eqref{eqnone}
successively \( T_0 = 1,2,3,\ldots \) Therefore, two solutions which
agree for \( T_0 = 0 \) and \( T_0 =1 \) are identical, hence every
solution is of the form \eqref{eqntwo}.
The solution breaks down if \( p = q = 1/2 \), since then we do not
get two linearly independent solutions of the difference equation (we
get the solution \( 1 \) repeated twice). Instead, we need to
borrow a result from differential equations (from the
variation-of-parameters/reduction-of-order set of ideas used to
derive a complete linearly independent set of solutions.) Certainly,
\( 1 \) is still a solution of the difference equation \eqref{eqnone}.
A second linearly independent solution is \( T_0 \), (check it out!)
and the general solution is \( q_{T_0} = A + B T_0 \). To satisfy
the boundary conditions, we must put \( A = 1 \), and \( A + B a = 0
\), hence \( q_{T_0} = 1 - T_0/a \).
\end{proof}
We can consider a symmetric interpretation of this gambling game.
Instead of a single gambler playing at a casino, trying to make a goal \(
a \) before being ruined, consider two gamblers Alice and Bill playing
against each other. Let Alice's initial capital be \( T_0 \) and let
her play against adversary Bill with initial capital \( a-T_0 \) so that
their combined capital is \( a \). The game continues until one
gambler's capital either is reduced to zero or has increased to \( a \),
that is, until one of the two players is ruined.
\begin{corollary}
\( p_{T_0} + q_{T_0} = 1 \)
\end{corollary}
\begin{proof}
The probability \( p_{T_0} \) of Alice's winning the game equals the
probability of Bill's ruin. Bill's ruin (and Alice's victory) is
therefore obtained from our ruin formulas on replacing \( p \), \( q
\), and \( T_0 \) by \( q \), \( p \), and \( a-T_0 \) respectively.
That is, from our formula (for \( p \ne q \)) the probability of
Alice's ruin is
\[
q_{T_0} = \frac{(q/p)^a - (q/p)^{T_0}}{ (q/p)^a - 1}
\]
and the probability of Bill's ruin is
\[
p_{T_0} = \frac{ (p/q)^a - (p/q)^{a-T_0}}{ (p/q)^a - 1 }.
\]
Then add these together,and after some algebra, the total is \( 1 \).
(Check it out!)
For \( p =1/2 = q \), the proof is simpler, since then \( p_{T_0} =
1 - (a-{T_0})/a \), and \( q_{T_0} = 1 - T_0/a \), and \( p_{T_0} +
q_{T_0} = 1 \) easily.
\end{proof}
\begin{corollary}
The expected gain against the infinitely rich adversary is \( \E{G}
= (1 - q_{T_0})a - T_0 \).
\end{corollary}
\begin{proof}
In the game against the infinitely rich adversary, the gambler's
ultimate gain (or loss!) is a Bernoulli (two-valued) random
variable, \( G \), where \( G \) assumes the value \( -{T_0} \) with
probability \( q_{T_0} \), and assumes the value \( a-{T_0} \) with
probability \( p_{T_0} \). Thus the expected value is
\begin{align*}
\E{G} &= (a-{T_0}) p_{T_0} + (-{T_0}) q_{T_0} \\
&= p_{T_0} a - {T_0} \\
&= (1-q_{T_0}) a - {T_0}.
\end{align*}
\end{proof}
Now notice that if \( q = 1/2 = p \), so that we are dealing with a fair
game, then \( \E{G} = ( 1 - (1 - {T_0}/a)) \cdot a - {T_0} = 0 \). That
is, a fair game in the short run is a fair game in the long run.
However, if \( p < 1/2 < q \), so the game is not fair then our
expectation formula says
\begin{align*}
\E{G} &= \left( 1 - \frac{ (q/p)^a-(q/p)^{T_0}}{(q/p)^a-1} \right) a
- {T_0} \\
&= \frac{ (q/p)^{T_0} - 1 }{ (q/p)^a - 1 } a - {T_0}\\
& = \left( \frac{ [ (q/p)^{T_0} - 1] a }{ [(q/p)^a - 1] T_0 } - 1
\right) T_0
\end{align*}
The sequence \( [(q/p)^n - 1]/n \) is an increasing sequence, so
\[
\left( \frac{ [ (q/p)^{T_0} - 1] a }{ [(q/p)^a - 1] T_0 } - 1 \right)
< 0.
\]
\begin{remark}
An unfair game in the short run is an unfair game in the long run.
\end{remark}
\begin{corollary}
The probability of ultimate ruin of a gambler with initial capital \(
{T_0} \) playing against an infinitely rich adversary is
\[
q_{T_0} = 1, \qquad p \le q
\]
and
\[
q_{T_0} = (q/p)^{T_0}, \qquad p > q.
\]
\end{corollary}
\begin{proof}
Let \( a \to \infty \) in the formulas. (Check it out!)
\end{proof}
\begin{remark}
This corollary says that the probability of ``breaking the bank at
Monte Carlo'' as in the movies is zero, at least for the simple
games we are considering.
\end{remark}
%% In a random walk starting at
%% the origin the probability of ever reaching the position {T_0}
%% > 0 equals 1 if p >= q and equals (p/q)^{T_0} when p <
%% q.
\subsection*{Some Calculations for Illustration}
\begin{tabular}{||r|r|l|l|r|r|l|l||}
\hline
\( p \) & \( q \) & \( T_0 \) & \( a \) & Probability & Probability & Expected \\
& & & & of Ruin & of Victory & Gain \\
\hline
0.5 & 0.5 & 9 & 10 & 0.1000 & 0.9000 & 0 \\
0.5 & 0.5 & 90 & 100 & 0.1000 & 0.9000 & 0 \\
0.5 & 0.5 & 900 & 1,000 & 0.1000 & 0.9000 & 0 \\
0.5 & 0.5 & 950 & 1,000 & 0.0500 & 0.9500 & 0 \\
0.5 & 0.5 & 8,000 & 10,000 & 0.2000 & 0.8000 & 0 \\
\hline
0.45 & 0.55 & 9 & 10 & 0.2101 & 0.7899 & -1 \\
0.45 & 0.55 & 90 & 100 & 0.8656 & 0.1344 & -77 \\
0.45 & 0.55 & 99 & 100 & 0.1818 & 0.8182 & -17 \\
0.4 & 0.6 & 90 & 100 & 0.9827 & 0.0173 & -88 \\
0.4 & 0.6 & 99 & 100 & 0.3333 & 0.6667 & -32 \\
\hline
\end{tabular}
\subsection*{Why do we hear about people who actually win?}
We often hear from people who consistently win at the casino. How can
this be in the face of the theorems above?
A simple illustration makes clear how this is possible. Assume for
convenience a gambler who repeatedly visits the casino, each time with a
certain amount of capital. His goal is to win 1/9 of his capital. That
is, in units of his initial capital \( {T_0} = 9 \), and \( a = 10 \).
Assume too that the casino is fair so that \( p = 1/2 = q \), then the
probability of ruin in any one year is:
\[
q_{T_0} = 1 - 9/10 = 1/10.
\]
That is, if the working capital is much greater than the amount
required for victory, then the probability of ruin is reasonably small.
Then the probability of an unbroken string of ten successes in ten years
is:
\[
(1 - 1/10)^{10} \approx \exp(-1) \approx 0.37
\]
This much success is reasonable, but simple psychology would suggest the
gambler would boast about his skill instead of crediting it to luck.
Moreover, simple psychology suggests the gambler would also blame one
failure on oversight, momentary distraction, or even cheating by the
casino!
\subsection*{Another Interpretation as a Random Walk}
Another common interpretation of this probability game is to imagine it
as a \defn{random walk}.%
\index{random walk}
That is, we imagine an individual on a number line, starting at some
position \( T_0 \). The person takes a step to the right to \( T_0 +1 \)
with probability \( p \) and takes a step to the left to \( T_0-1 \)
with probability \( q \) and continues this random process. Then
instead of the total fortune at any time, we consider the geometric
position on the line at any time. Instead of reaching financial ruin or
attaining a financial goal, we talk instead about reaching or passing a
certain position. For example, Corollary 3 says that if \( p \le q \),
then the probability of visiting the origin before going to infinity is \(
1 \). The two interpretations are equivalent and either can be used
depending on which is more useful. The problems below are phrased in the
random walk interpretation, because they are more naturally posed in
terms of reaching or passing certain points on the number line.
\subsection*{The interpretation as Markov Processes and Martingales}
The fortune in the coin-tossing game is the first and simplest encounter
with two of the most important ideas in modern probability theory.
We can interpret the fortune in our gambler's coin-tossing game as a
\defn{Markov process}.%
\index{Markov process}
That is, at successive times the process is in various states. In our
case, the states are the values of the fortune. The probability of
passing from one state at the current time \( t \) to another state at
time \( t+1 \) is completely determined by the present state. That is,
for our coin-tossing game
\begin{align*}
\Prob{ T_{t+1} = x+1 | T_t = x} &= p \\
\Prob{ T_{t+1} = x-1 | T_t = x} &= q \\
\Prob{ T_{t+1} = y | T_t = x} &= 0 \text{ for all \( y
\ne x+1, x-1 \)}
\end{align*}
The most important property of a Markov process is that the probability
of being in the next state is completely determined by the current state
and not the history of how the process arrived at the current state. In
that sense, we often say that a Markov process is memory-less.
We can also note the fair coin-tossing game with \( p = 1/2 = q \) is a
\defn{martingale}.%
\index{martingale}
That is, the expected value of the process at the next step is the
current value. Using expectation for estimation, the best estimate we
have of the gambler's fortune at the next step is the current fortune:
\[
\E{T_{n+1} | T_n = x} = (x+1)(1/2) + (x-1)(1/2) = x.
\]
This characterizes a fair game, after the next step, one can neither
expect to be richer or poorer. Note that the coin-tossing games with \(
p \ne q \) do not have this property.
In later sections we have more occasions to study the properties of
martingales, and to a lesser degree Markov processes.
\subsection*{Sources}
This section is adapted from W. Feller, in \emph{Introduction to
Probability Theory and Applications, Volume I}, Chapter XIV, page 342,
\cite{feller73}. Some material is adapted from
\cite{steele01} and
\cite{karlin81-secon-cours-stoch-proces}. Steele has an excellent
discussion at about the same level as I have done it here, but with a
slightly more rigorous approach to solving the difference equations. He
also gives more information about the fact that the duration is almost
surely finite, showing that all moments of the duration are finite.
Karlin and Taylor give a treatment of the ruin problem by direct
application of Markov chain analysis, which is not essentially
different, but points to greater generality.
\hr
\visual{Algorithms, Scripts, Simulations}{../../../../CommonInformation/Lessons/computer.png}
\section*{Algorithms, Scripts, Simulations}
\subsection*{Algorithm}
The goal is to simulate the probability function for
ruin with a given starting value. First set the probability \( p \), number of
Bernoulli trials \( n \), and number of experiments \( k \). Set the ruin and victory
values \( r \) and \( v \), the boundaries for the random walk. For each starting value
from ruin to victory, fill an \( n \times k \) matrix with the
Benoulli random variables. For languages with multi-dimensional
arrays each the data is kept in a three-dimensional array of size \( n \times
k \times (v-r+1) \). Cumulatively sum the Bernoulli random
variables to create the fortune or random walk. For each starting
value, for each random walk or fortune path, find the step where ruin
or victory is encountered. For each starting value, fnd the
proportion of fortunes encountering ruin. Finally, find a least squares
linear fit of the ruin probabilities as a function of the starting
value.
\subsection*{Scripts}
\begin{description}
\item[Geogebra]
\item[R]
\begin{lstlisting}[language=R]
p <- 0.5
n <- 150
k <- 60
victory <- 10
# top boundary for random walk
ruin <- -10
# bottom boundary for random walk
interval <- victory - ruin + 1
winLose <- 2 * (array( 0+(runif(n*k*interval) <= p), dim=c(n,k,
interval))) - 1
# 0+ coerces Boolean to numeric
totals <- apply( winLose, 2:3, cumsum)
# the second argument ``2:3'' means column-wise in each panel
start <- outer( array(1, dim=c(n+1,k)), ruin:victory, "*")
paths <- array( 0 , dim=c(n+1, k, interval) )
paths[2:(n+1), 1:k, 1:interval] <- totals
paths <- paths + start
hitVictory <- apply(paths, 2:3, (function(x)match(victory,x,
nomatch=n+2)));
hitRuin <- apply(paths, 2:3, (function(x)match(ruin, x, nomatch=n+2)));
# the second argument ``2:3'' means column-wise in each panel
# If no ruin or victory on a walk, nomatch=n+2 sets the hitting
# time to be two more than the number of steps, one more than
# the column length. Without the nomatch option, get NA which
# works poorly with the comparison hitRuin < hitVictory next.
probRuinBeforeVictory <-
apply( (hitRuin < hitVictory), 2,
(function(x)length((which(x,arr.ind=FALSE)))) )/k
startValues <- (ruin:victory);
ruinFunction <- lm(probRuinBeforeVictory ~ startValues)
# lm is the R function for linear models, a more general view of
# least squares linear fitting for response ~ terms
plot(startValues, probRuinBeforeVictory);
abline(ruinFunction)
\item[Octave]
\begin{lstlisting}[language=Octave]
p = 0.5;
n = 150;
k = 60;
victory = 10;
# top boundary for random walk
ruin = -10;
# bottom boundary for random walk
probRuinBeforeVictory = zeros(1, victory-ruin+1);
for start = ruin:victory
winLose = 2 * (rand(n,k) <= p) - 1;
# -1 for Tails, 1 for Heads
totals = cumsum(winLose);
# -n..n (every other integer) binomial rv sample
paths = [zeros(1,k); totals] + start;
victoryOrRuin = zeros(1,k);
for j = 1:k
hitVictory = find(paths(:,j) >= victory);
hitRuin = find(paths(:,j) <= ruin);
if ( !rows(hitVictory) && !rows(hitRuin) )
# no victory, no ruin
# do nothing
elseif ( rows(hitVictory) && !rows(hitRuin) )
# victory, no ruin
victoryOrRuin(j) = hitVictory(1);
elseif ( !rows(hitVictory) && rows(hitRuin) )
# no victory, but hit ruin
victoryOrRuin(j) = -hitRuin(1);
else # ( rows(hitvictory) && rows(hitruin) )
# victory and ruin
if ( hitVictory(1) < hitRuin(1) )
victoryOrRuin(j) = hitVictory(1);
# code hitting victory
else
victoryOrRuin(j) = -hitRuin(1);
# code hitting ruin as negative
endif
endif
endfor
probRuinBeforeVictory(start + (-ruin+1)) = sum( victoryOrRuin < 0 )/k;
# probRuinBeforeVictory
endfor
function coeff = least_square (x,y)
n = length(x);
A = [x ones(n,1)];
coeff = A\y;
plot(x,y,'x');
hold on
interv = [min(x) max(x)];
plot(interv,coeff(1)*interv+coeff(2));
end
least_square(transpose( ruin : victory ), transpose(probRuinBeforeVictory))
\end{lstlisting}
\item[Perl]
use PDL;
use PDF::NiceSlice;
$p = 0.5;
$n = 15;
$k = 10;
$victory = 4;
$ruin = -4;
$interval = $victory - $ruin + 1;
$winLose = 2 * ( random($k,$n, $interval) >= $p ) - 1;
$totals = (cumusumover $winLose->xchg(0,1))->transpose;
$start = zeroes($k,$n+1, $interval)->zlinvals($ruin,$victory);
$paths = zeroes($k,$n+1,$interval);
# use PDL:NiceSlice on next line
$paths( 0:($k-1), 1:$n, 0:($interval-1) ) .= $totals;
# Note the use of the concat operator here.
$paths = $paths + $start;
$hitVictory = $paths->setbadif($paths < $victory);
$hitRuin = $paths->setbadif($paths > $ruin);
$victoryIndex = $hitVictory(,,:)->xchg(0,1)->maximum_ind;
$ruinIndex = $hitRuin(,,:)->xchg(0,1)->maximum_ind;
$test = whichND($hitVictory)
p $test->dims
p $test(,0:20)
\item[SciPy]
\end{description}
\hr
\visual{Problems to Work}{../../../../CommonInformation/Lessons/solveproblems.png}
\section*{Problems to Work for Understanding}
\begin{enumerate}
\item
Show the sequence \( [(q/p)^n - 1]/n \) is an increasing
sequence for \( 0 < p < 1/2 < q < 1. \)
\item
In a random walk starting at the origin find the probability
that the point \( a > 0 \) will be reached before the point \(
-b < 0 \).
%% \HCode{} Solution \HCode{}
\item
James Bond wants to ruin the casino at Monte Carlo by
consistently betting 1 Euro on Red at the roulette wheel. The
probability of Bond winning at one turn in this game is $ 18/38
\approx 0.474 $. James Bond, being Agent 007, is backed by the
full financial might of the British Empire, and so can be
considered to have unlimited funds. Approximately how much
money should the casino have to start with so that Bond has only
a ``one-in-a-million'' chance of ruining the casino?
\item
A gambler starts with \$2 and wants to win \$2 more to get to a
total of \$4 before being ruined by losing all his money. He
plays a coin-flipping game, with a coin that changes with his
fortune.
\begin{enumerate}
\item
If the gambler has \$2 he plays with a coin that gives
probability $ p=1/2 $ of winning a dollar and
probability $ q=1/2 $ of losing a dollar.
\item
If the gambler has \$3 he plays with a coin that gives
probability $ p=1/4 $ of winning a dollar and
probability $ q=3/4 $ of losing a dollar.
\item
If the gambler has \$1 he plays with a coin that gives
probability $ p=3/4 $ of winning a dollar and
probability $ q=1/4 $ of losing a dollar.
\end{enumerate}
Use ``first step analysis'' to write three equations in three
unknowns (with two additional boundary conditions) that give the
probability that the gambler will be ruined. Solve the equations
to find the ruin probability.
% \emph{Solution:} The equations are:
% \begin{eqnarray*}
% q_4 &=& 0 \\
% q_3 &=& \frac{1}{4} q_4 + \frac{3}{4} q_2 \\
% q_2 &=& \frac{1}{2} q_3 + \frac{1}{2} q_1 \\
% q_1 &=& \frac{3}{4} q_2 + \frac{1}{4} q_0 \\
% q_0 &=& 1
% \end{eqnarray*}
% The solution is $q_1 = 5/8$, $q_2 = 1/2$ and $q_3 = 3/8$.
\item
A gambler plays a coin flipping game in which the probability of
winning on a flip is \( p = 0.4 \) and the probability of losing
on a flip is \( q = 1-p = 0.6 \). The gambler wants to reach
the victory level of \$16 before being ruined with a fortune of
\$0. The gambler starts with \$8, bets \$2 on each flip when
the fortune is \$6, \$8, \$10 and bets \$4 when the fortune is \$4
or \$12 Compute the probability of ruin in this game.
% Using our standard notation of \( q_n \) for the probability of
% ruin from a fortune of \( n \) the first step analysis yields
% the equations working down from the
% \begin{align*}
% q_{0} &= 1 \\
% q_{4} &= 0.6 \cdot q_{0} + 0.4 \cdot q_{6} \\
% q_{6} &= 0.6 \cdot q_{4} + 0.4 \cdot q_{8} \\
% q_{8} &= 0.6 \cdot q_{6} + 0.4 \cdot q_{8} \\
% q_{10} &= 0.6 \cdot q_{8} + 0.4 \cdot q_{12} \\
% q_{12} &= 0.6 \cdot q_{10} + 0.4 \cdot q_{16} \\
% q_{16} &= 0 \\
% \end{align*}
% The solution is \( q_0 = 1., q_{10} = .6090225564, q_{12} =
% .3654135338, q_{16} = 0., q_{4} = .9518796992, q_{6} =
% .8796992481, q_{8} = .7714285714 \).
% Note that the probability of ruin for the timid strategy of
% betting \$1 at each fortune is \( 0.9624468240 \), so this
% version of bold play actually reduces the probability of ruin!
% In fact, the boldest play is to bet \$8 on each bet, which
% reduces the probability of ruin to \( 0.6 \).
\item
Prove: In a random walk starting at the origin the probability
to reach the point \( a > 0 \) before returning to the origin
equals \( p(1-q_1) \).
%% \HCode{} Solution \HCode{}
\item
Prove: In a random walk starting at \( a > 0 \) the probability
to reach the origin before returning to the starting point
equals \( q q_{a-1} \).
%% \HCode{} Solution \HCode{}
\item
In the simple case \( p = 1/2 = q \), conclude from the
preceding problem: In a random walk starting at the origin, the
number of visits to the point \( a > 0 \) that take place before
the first return to the origin has a geometric distribution with
ratio \( 1 - q q_{a-1} \). (Why is the condition \( q \ge p \)
necessary?)
%% \HCode{} Solution \HCode{}
\item
\begin{enumerate}
\item
Draw a sample path of a random walk (with \( p = 1/2 = q
\)) starting from the origin where the walk visits the
position \( 5 \) twice before returning to the origin.
\item
Using the results from the previous problems, it can be
shown with careful but elementary reasoning that the
number of times \( N \) that a random walk (\( p = 1/2 =
q \)) reaches the value \( a \) a total of \( n \) times
before returning to the origin is a geometric random
variable with probability
\[
\Prob{N=n} = \left( \frac{1}{2a} \right)^n \left( 1
- \frac{1}{2a} \right).
\]
Compute the expected number of visits \( \E{N} \) to
level \( a \).
\item
Compare the expected number of visits of a random walk
(with \(
p= 1/2 = q \)) to the value \( 1{,}000{,}000 \) before
returning to the origin and to the level \( 10 \) before
returning to the origin.
\end{enumerate}
%% \HCode{} Solution \HCode{}
\item
This problem is adapted from \emph{Stochastic Calculus and
Financial Applications} by J. Michael Steele, Springer, New
York, 2001, Chapter 1, Section 1.6, page 9. Information on
buy-backs is adapted from investorwords.com. This problem
suggests how results on biased random walks can be worked into
more realistic models.
Consider a naive model for a stock that has a support level of
\$20/share because of a corporate buy-back program. (This means
the company will buy back stock if shares dip below \$20 per
share. In the case of stocks, this reduces the number of shares
outstanding, giving each remaining shareholder a larger
percentage ownership of the company. This is usually considered
a sign that the company's management is optimistic about the
future and believes that the current share price is undervalued.
Reasons for buy-backs include putting unused cash to use,
raising earnings per share, increasing internal control of the
company, and obtaining stock for employee stock option plans or
pension plans.) Suppose also that the stock price moves randomly
with a downward bias when the price is above \$20, and randomly
with an upward bias when the price is below \$20. To make the
problem concrete, we let $ S_n $ denote the stock price at time $
n $, and we express our stock support hypothesis by the
assumptions that
\begin{eqnarray*}
\Prob{ S_{n+1} = 21 | S_{n} = 20} &= & 9/10 \\
\Prob{ S_{n+1} = 19 | S_{n} = 20} &= & 1/10
\end{eqnarray*}
We then reflect the downward bias at price levels above \$20 by
requiring that for $ k > 20 $:
\begin{eqnarray*}
\Prob{ S_{n+1} = k+1 | S_{n} = k } &= & 1/3 \\
\Prob{ S_{n+1} = k-1 | S_{n} = k } &= & 2/3.
\end{eqnarray*}
We then reflect the upward bias at price levels below \$20 by
requiring that for $ k < 20 $:
\begin{eqnarray*}
\Prob{ S_{n+1} = k+1 | S_{n} = k } &= & 2/3 \\
\Prob{ S_{n+1} = k-1 | S_{n} = k } &= & 1/3
\end{eqnarray*}
Using the methods of ``single-step analysis'' calculate the
expected time for the stock to fall from \$25 through the
support level all the way down to \$18. (I don't believe that
there is any way to solve this problem using formulas. Instead
you will have to go back to basic principles of single-step or
first-step analysis to solve the problem.)
\end{enumerate}
\hr
\visual{Books}{../../../../CommonInformation/Lessons/books.png}
\section*{Reading Suggestion:}
\bibliography{../../../../CommonInformation/bibliography}
% \begin{enumerate}
% \item
% Section XIV.2, \emph{Introduction to Probability Theory and Its
% Applications} by W. Feller, J. Wiley and Sons, pages 345 -348.
% \item
% Section 3.3 \emph{A First Course in Stochastic Processes} by S.
% Karlin and H. Taylor, Academic Press, 1975, pages 92-93. This
% gives a treatment of the ruin problem by direct application of
% Markov chain analysis, which is not essentially different, but
% points to greater generality.
% \item
% Chapter 1 of \emph{Stochastic Calculus and Financial
% Applications} by J. Michael Steele. Springer, New York, 2001.
% (QA 274.2 S 74) Steele has an excellent discussion at about the
% same level as I have done it here, but with a slightly more
% rigorous approach to solving the difference equations. He also
% gives more information about the fact that the duration is
% almost surely finite, showing that all moments of the duration
% are finite.
% \end{enumerate}
\hr
\visual{Links}{../../../../CommonInformation/Lessons/chainlink.png}
\section*{Outside Readings and Links:}
\begin{enumerate}
\item
\link{http://www.math.uah.edu/stat/games/index.xhtml}{Virtual
Labs in Probability} Section 13, Games of Chance. Scroll down
and select the Red and Black Experiment (marked in red in the
Applets Section. Read the description since the scenario is
slightly different but equivalent to the description above.)
\item
\link{http://math.ucsd.edu/~anistat/gamblers_ruin.html}{University
of California, San Diego, Department of Mathematics, A.M.
Garsia} A java applet that simulates how long it takes for a
gambler to go broke. You can control how much money you and the
casino start with, the house odds, and the maximum number of
games. Results are a graph and a summary table. Submitted by
Matt Odell, September 8, 2003.
\item
\link{http://mathworld.wolfram.com/GamblersRuin.html}{Eric
Weisstein, World of Mathematics} A good description of gambler's
ruin, martingale and many other coin tossing and dice problems
and various probability problems Submitted by Yogesh Makkar,
September 16th 2003.
\end{enumerate}
\hr
\mydisclaim \myfooter
Last modified: \flastmod
\end{document}