from scipy.stats import norm
import scipy.integrate as integrate
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# By Sheng BI
Consider an unemployed worker, infinitely lived. At each time $t$, she receives a job offer $w_{t}$.
The job offer is drawn from an $i.i.d.$ distribution with respect to $t$. Let $F(w_{t})$ denote the probability that wage $w_{t}$ is offered.
Upon receiving $w_{t}$, her decision is :
Thus she is in either one of the two states: employed or unemployed
An unemployed maximizes her lifetime expected utility - the discounted sum of consumption $c_{t}$. In this simplified model, our worker can not save to transfer assets intertemporally, so her consumption is equal to her income $c_{t} = y_{t}$ .
$$ \begin{eqnarray*} \Sigma_{t=0}^{\infty} E[\beta\times y_{t}] \tag{1} \end{eqnarray*} $$where
$$y_{t}=\begin{cases} w & \text{if accept}\\ b & \text{if reject} \end{cases} $$Parameters:
## Sampling wages from normal distribution.
w_min, w_max = 0, 200
w_vals = np.linspace(w_min, w_max, (w_max-w_min))
w_mean, w_std = 100, 20
wpdf_vals = norm.pdf(w_vals, loc = w_mean, scale = w_std)
my_benefit = 30
my_beta = 0.99
class MccallModel():
def __init__(self, b = my_benefit,\
beta = my_beta,\
wage_vals = w_vals,\
wpdf_vals = wpdf_vals):
'''
Publicly accessible and modifiable class attributes.
'''
self.compensation = b
self.beta = beta
self.wVals = wage_vals
self.wpdfVals = wpdf_vals
return
fig, ax = plt.subplots(figsize=(9, 7.5))
ax.stem(w_vals, wpdf_vals, linefmt = '')
# ax.set_xlim(w_min,w_max)
ax.set_xlim(50,150)
ax.set_xlabel('Wage draw')
ax.set_ylabel('Probability density')
plt.show()
# ref: https://stackoverflow.com/questions/7406943/python-immutable-private-class-variables
We are interested in the case where the worker makes optimal decisions.
By convention, let $\sigma$ be the worker's decision on whether or not to accept the wage offer $w$.An optimal policy is a function from state to decision.
$$\sigma(w)=\begin{cases} \frac{w}{1-\beta} & \text{if accept}\\ b + \beta \int_{0}^{B}V_{\sigma}(w')dF(w') & \text{if reject} \end{cases} $$Here, $V_{\sigma}(w)$ denotes the expected lifetime utility value of a worker who enters into the current period unemployed and receives a wage offer $w$.
More importantly, let $V_{\sigma}(w)$ be the value when the worker makes optimal decisions at the current period and at all future time points.
Bellman's equation for the unemployed:
\begin{eqnarray*} V_{\sigma}(w) = max(\frac{w}{1-\beta}, b + \beta E[V_{\sigma}(w')]) \end{eqnarray*}where $B$ is the upperbound for $w_{t}$
Worker's tradeoff: accept the job offer early, or wait until a better offer to come in the future.
Existence of optimal policies follows from Contraction Mapping Theorem.
Optimal policy: accept when $w = \bar{w}$
Reservation wage is given by
$$ \begin{eqnarray*} \frac{\bar{w}}{1-\beta} = \bar{V_\sigma} = b + \beta \int_{0}^{B}V_\sigma(w')dF(w') \end{eqnarray*} $$So that $\bar{V_\sigma}$ must be a function of $\bar{w}$, which is determined by other parameters in the model.
for all $w < \bar{w}$,
$$ \begin{eqnarray} V_\sigma(w) & = & b + \beta \int_{0}^{B}V_\sigma(w')dF(w') \\ & = & \frac{\bar{w}}{1-\beta} \end{eqnarray} $$for all $w > \bar{w}$,
$$ \begin{eqnarray} V_\sigma(w) = \frac{w}{1-\beta} \end{eqnarray} $$So that we obtain:
\begin{eqnarray*} \frac{\bar{w}}{1-\beta} = b + \beta \int_{0}^{\bar{w}}\frac{\bar{w}}{1-\beta}dF(w') + \beta \int_{\bar{w}}^{B}\frac{w'}{1-\beta}dF(w') \tag{2} \end{eqnarray*}Since
\begin{eqnarray*} 1 = \int_{0}^{B}dF(w') = \int_{0}^{\bar{w}}dF(w') + \int_{\bar{w}}^{B}dF(w') \end{eqnarray*}we can rearrange $(2)$ to obtain
\begin{eqnarray*} \bar{w}\int_{0}^{\bar{w}}dF(w') - b = \frac{1}{1-\beta} \int_{\bar{w}}^{B}(\beta w' - \bar{w})dF(w') \tag{3} \end{eqnarray*}Adding both sides of the following equation to $(3)$
\begin{eqnarray*} \bar{w}\int_{\bar{w}}^{B}dF(w') = \frac{(1-\beta)\bar{w}}{1-\beta} \int_{\bar{w}}^{B}dF(w') \end{eqnarray*}we rearrange, we obtain:
\begin{eqnarray*} \bar{w} - b = \frac{\beta}{1-\beta} \int_{\bar{w}}^{B}(w' - \bar{w})dF(w') \tag{4} \end{eqnarray*}The following code confirms that there exists a solution for $\bar{w}$. And this reservation wage is greater than the discounted sum of unemployment benefit. This is because anticipation of future employment possibility brings utility.
## https://matplotlib.org/users/pyplot_tutorial.html
def lhs(rvw, slope = 1, intercept = - my_benefit):
return slope * rvw + intercept
def integrand_continuous(x, rvw):
result = (x-rvw) * norm.pdf(x, loc = w_mean, scale = w_std)
return result
def integrand_discrete(rvw, x, std = w_std):
result = np.where(x>=rvw, (x-rvw) * norm.pdf(x, loc = w_mean, scale = std), 0)
# result = np.where(x>=rvw, (x-rvw) * F_vals,0)
return result
def rhs(rvw, beta = my_beta, B= 2000, continuous = True, std = w_std):
'''
In python, integration can not be vectorized.
'''
result = []
if continuous:
for w in rvw:
result.append(integrate.quad(integrand_continuous, \
w, B, args = (w))[0])
result = (beta/(1-beta)) * np.array(result)
if not continuous:
result = map(lambda w: (my_beta/(1-my_beta)) * \
np.sum(integrand_discrete(w, x = w_vals, std = std)), steps)
# result = [np.sum(integrand(w,x=w_vals)) for w in steps]
# for each in steps:
# result.append(np.sum(integrand(w_vals, each)))
return result
steps = np.arange(0.0, 170.0, 1)
fig, ax = plt.subplots()
ax.plot(steps, lhs(steps), 'b-')
ax.plot(steps, rhs(steps, continuous = True), 'b-')
ax.plot(steps, rhs(steps, continuous = False), 'r-') ## discrete approximation
ax.set_ylim(-100,600)
ax.set_xlim(0,140)
# plt.show()
ax.text(80, 90, r'$\bar{w} - b$', fontsize = 15)
ax.text(40, 500, \
r"$\frac{\beta}{1-\beta} \int_{\bar{w}}^{B}(w' - \bar{w})dF(w')$", \
fontsize = 15)
ax.axvline(x=125.4, ymin = 0, ymax = 600)
ax.set_xlabel("wage")
ax.set_ylabel("left hand side | right hand side")
The reservation wage solved by the above method is 125.5.
We also notice that discrete sum is a good approximation for the integration, which proceeds slowly.
Now, how does the solution vary with respect to standard deviation of wage distribution?
steps = np.arange(0.0, 170.0, 1)
fig, ax = plt.subplots(figsize=(9, 6.5))
ax.plot(steps, lhs(steps), 'b-')
for each in np.linspace(10,30,5):
ax.plot(steps, rhs(steps, continuous = False, std = each),
label = "standard deviation: {}".format(each)) ## discrete approximation
ax.set_ylim(-100,600)
ax.set_xlim(0,170)
# plt.show()
ax.legend(loc = 'upper left')
ax.set_xlabel("wage")
Comparative Statics:
In the following, we use iteration to solve the model as in the original lecture.As $t$ increases, the contraction mapping theorem leads the worker's strategy to converge her reservation strategy, at which point the value of our value function $V$ is optimal.
\begin{eqnarray*} V_{\sigma, t+1}(w) = max(\frac{w}{1-\beta}, b + \beta \int_{0}^{B}V_{\sigma, t}(w')dF(w')) \end{eqnarray*}class ValueFunction(MccallModel):
"""
Inheritance
"""
def __init__(self, b = my_benefit,\
beta = my_beta,\
wage_vals = w_vals,\
wpdf_vals = wpdf_vals):
MccallModel.__init__(self)
# initialization of v
self.current = self.wVals / (1 - self.beta)
self.next = np.empty_like(self.current)
return
valueFunc = ValueFunction()
fig, ax = plt.subplots(figsize=(9, 8.5))
for i in range(10):
ax.plot(w_vals, valueFunc.current,\
label="iterate {}".format(i))
valueFunc.next = \
map(lambda val: \
np.maximum(val, my_benefit + \
my_beta * np.sum(np.multiply(valueFunc.current,valueFunc.wpdfVals))), \
w_vals/(1-my_beta))
valueFunc.current[:] = valueFunc.next
print(valueFunc.next[30])
ax.set_ylim(9000,12500)
ax.set_xlim(0,200)
ax.set_xlabel("Wage")
ax.set_ylabel("Value of value function")
ax.legend(loc='center right')
def solve_model(b = my_benefit, beta = my_beta,\
wage_vals = w_vals,\
wpdf_vals = wpdf_vals,\
num_iter=1000,tol=1e-8):
valueFunc = ValueFunction()
i, bias = 0, 100
while (i < num_iter and bias > tol):
valueFunc.next = \
map(lambda val: \
np.maximum(val, my_benefit + my_beta * np.sum(valueFunc.current * wpdf_vals)), \
w_vals/(1-my_beta))
ref = np.copy(valueFunc.current)
valueFunc.current[:] = valueFunc.next
bias = np.max(np.abs(valueFunc.current - ref))
i += 1
reservation_wage = (1 - beta) * \
(b + beta * np.sum(valueFunc.current * valueFunc.wpdfVals))
return valueFunc, reservation_wage
print(solve_model()[1])
The gap between the reservation wage solved from iteration and the reservation wage solved using equation $(4)$ is because normal distribution is continuous, and we were using discrete approximation of the probability density to compute the value function.