The $1-\alpha$ family confidence limits for $\beta_0$ and $\beta_1$ for normal error regression model by the Bonferroni procedure are:
$b_0 \pm Bs\{b_0\}$ and $b_1 \pm Bs\{b_1\}$
where $B= t(1-\alpha/4;n-2)$
# read the data
from read_data_fx import read_tb_dataTA
data=read_tb_dataTA(1,1) # this data is given in table 1.1 in the book
X=data[:,0] # Lot size
Y=data[:,1] #Work Hours
import numpy as np
import matplotlib.pylab as plt
from scipy import stats
def lin_reg(X,Y):
barX=np.mean(X); barY=np.mean(Y)
XminusbarX=X-barX; YminusbarY=Y-barY
b1=sum(XminusbarX*YminusbarY)/sum(XminusbarX**2)
b0=barY-b1*barX
Yhat=b0+b1*X
e_i=Y-Yhat
sse=np.sum(e_i**2)
ssr=np.sum((Yhat-barY )**2)
return {'Yhat':Yhat,'b0':b0,'b1':b1,'e_i': e_i,'sse':sse,'ssr':ssr}
n=len(X)
linreg=lin_reg(X,Y)
Yhat=linreg['Yhat']
sse=linreg['sse']
MSE=sse/np.float(n-2.0)
barX=np.mean(X)
XminusbarX=X-barX
sb1=np.sqrt(MSE/sum(XminusbarX**2))
sb0=np.sqrt(MSE*(1.0/n+barX**2/sum(XminusbarX**2)))
B=stats.t.ppf(1-.10/4,n-2)
b0=linreg['b0'];b1=linreg['b1']
ub0=b0+B*sb0; lb0=b0-B*sb0; ub0 = format(ub0,'.2f'); lb0=format(lb0, '.2f')
ub1=b1+B*sb1; lb1=b1-B*sb1; ub1 = format(ub1,'.2f'); lb1=format(lb1, '.2f')
from IPython.display import display, Math, Latex
display(Math(str(lb0)+r' \leq \beta_0 \leq'+str(ub0)))
display(Math(str(lb1)+r' \leq \beta_1 \leq'+str(ub1)))
The simultaneous confidence limits for $g$ mean responses $E\{Y_h\}$ for normal error regression model
$\hat{Y}_h \pm Ws\{\hat{Y}_h\}$
Where $W^2=2F(1-\alpha;2,n-2)$
$\hat{Y}_h \pm Bs\{\hat{Y}_h\}$
where $B= t(1-\alpha/2g;n-2)$
Calculate above estimates for $X_h=30,65,100$; set family confidence coefficient $.90$
#Working-Hotteling Procedure
W=np.sqrt(2.0*stats.f.ppf(0.9,2,n-2))
Xh_list=[30,65,100]
up=np.zeros(len(Xh_list));lo=np.zeros(len(Xh_list));
Y_h_hat=np.zeros(len(Xh_list));s_of_yh_hat=np.zeros(len(Xh_list));
i=0
#print 'Xh ', 'Y_h_hat ', 's_of_yh_hat '
for Xh in Xh_list:
Y_h_hat[i]=b0+b1*Xh
s_of_yh_hat[i]=np.sqrt(MSE*(1.0/n+(Xh-barX)**2/sum(XminusbarX**2)))
up[i]=Y_h_hat[i]+W*s_of_yh_hat[i]
lo[i]=Y_h_hat[i]-W*s_of_yh_hat[i]
#print Xh, Y_h_hat[i],s_of_yh_hat[i]
i=i+1
import pandas as pd
pd.options.display.float_format = '{:,.1f}'.format
df = pd.DataFrame({'$X_h$': Xh_list[:],'$\hat{Y}_h$':Y_h_hat[:],'$s\{\hat{Y}_h\}$':s_of_yh_hat[:]})
df.reindex_axis(['$X_h$','$\hat{Y}_h$','$s\{\hat{Y}_h\}$'], axis=1)
df.set_index('$X_h$')
lo=np.float16(lo);up=np.float16(up)
display(Math(str(lo[0])+r' \leq E\{Y_h\} \leq'+str(up[0])))
display(Math(str(lo[1])+r' \leq E\{Y_h\} \leq'+str(up[1])))
display(Math(str(lo[2])+r' \leq E\{Y_h\} \leq'+str(up[2])))
#Bonferroni Procedure
B=stats.t.ppf(1-.10/(2*3.0),n-2) # g=3 here
Xh_list=[30,65,100]
up=np.zeros(len(Xh_list));lo=np.zeros(len(Xh_list));
Y_h_hat=np.zeros(len(Xh_list));s_of_yh_hat=np.zeros(len(Xh_list));
i=0
#print 'Xh ', 'Y_h_hat ', 's_of_yh_hat '
for Xh in Xh_list:
Y_h_hat[i]=b0+b1*Xh
s_of_yh_hat[i]=np.sqrt(MSE*(1.0/n+(Xh-barX)**2/sum(XminusbarX**2)))
up[i]=Y_h_hat[i]+B*s_of_yh_hat[i]
lo[i]=Y_h_hat[i]-B*s_of_yh_hat[i]
#print Xh, Y_h_hat[i],s_of_yh_hat[i]
i=i+1
lo=np.float16(lo);up=np.float16(up)
display(Math(str(lo[0])+r' \leq E\{Y_h\} \leq'+str(up[0])))
display(Math(str(lo[1])+r' \leq E\{Y_h\} \leq'+str(up[1])))
display(Math(str(lo[2])+r' \leq E\{Y_h\} \leq'+str(up[2])))