'====================================================== 'This program updates the estimates of Lancaster and Tulip (2015) including potential output growth 'Created by: David Lancaster, 25 June 2015 'revised by Peter Tulip, January 2019, to retrieve updated data from ABS '===================================================== '================= 'ESSENTIAL PRELIMINARIES '================= 'To run this program you need Eviews, a reasonably recent version of R and to install the R packages readabs, dplyr and tidyverse. ' you need to save this file in your directory of choice, then overwrite the following line with your directory %pathname = "G:/RESEARCH/PETERT/R/" 'Because this uses R, you need to use forward slashes / not the backslash \ that Eviews and Windows use. 'You may also want to update the estimation end date %ested below to equal the last quarter of data 'Then you hit run. '================= 'GLOBAL SETTINGS '================= %sd = "1959q3" 'Workfile start date %ed = "2020q4" 'Workfile end date %estsd = "1960q3" 'Estimation start date %ested = "2019q4" 'Estimation end date '================== 'CREATE WORKFILE '================== 'File path (for Eviews, not R) %directory = @runpath cd %directory %wkfile = "Okun_update.wf1" 'Location of workfile 'Create workfile close %wkfile workfile %wkfile q %sd %ed 'strings for temporary file names %qfilename = "ABS_QDATA.CSV" %mfilename = "ABS_MDATA.CSV" %qdata = @stripquotes(%pathname + %qfilename) %mdata = @stripquotes(%pathname + %mfilename) 'Pull data in from ABS using R connection within Eviews. Saves as csv files '************************************************************************************************************ 'Turn on connection to R XOPEN(type=r) 'Turn on external programming mode XON library(readabs) library(dplyr) library(tidyverse) 'import data -- this uses the readabs package by Matt Cowgill. GDP <- read_abs(series_id = "A2304402X") %>% transmute(date=date, GDP=value) # from 5206.0 Table 1 COE <- read_abs(series_id = "A2302607T") %>% transmute(date=date, COE=value) # from 5206.0 Table 24 PAYROLL <- read_abs(series_id = "A2302778K") %>% transmute(date=date, PAYROLL=value) # from 5206.0 Table 18 # SUBSIDY <- read_abs(series_id = "A85125601x") #special release by ABS NULC <- read_abs(series_id = "A2433074L") %>% transmute(date=date, NULC=value) # from 5206.0 Table 38 GNE_price <- read_abs(series_id = "A2303727C") %>% transmute(date=date, GNE_price=value) # from 5206.0 Table 5 NFGDP1 <- read_abs(series_id = "A2454488C") %>% transmute(date=date, NFGDP1=value) # from 5206.0 Table 24 NFGDP <- read_abs(series_id = "A2302589X") %>% transmute(date=date, NFGDP=value) # from 5206.0 Table 24 unempq <- read_abs(series_id = "A2454521V") %>% transmute(date=date, unempq=value) # from 1364.0.15.003 lfq <- read_abs(series_id = "A2454517C") %>% transmute(date=date, lfq=value) # from 1364.0.15.003 unempm <- read_abs(series_id = "A84423050A") %>% transmute(date=date, unempm=value) # from 6202.0 ABSqdata <- left_join(COE, GDP, by = "date") %>% left_join(NFGDP, by = "date") %>% left_join(GNE_price, by = "date") %>% left_join(NULC, by = "date") %>% left_join(PAYROLL, by = "date") %>% left_join(unempq, by = "date") %>% left_join(lfq, by = "date") ''comment: usual R coding with a pipe over different lines doesn't work. Need to delete the returns 'Write the time series to csv files zoo::write.zoo(ABSqdata, file = %qdata, sep = ",") zoo::write.zoo(unempm, file = %mdata, sep = ",") 'comment: saving the series to csv files, then retrieving it when we get back to Eviews is a clunky way of doing this. 'an alterantive approach is the xget command, which will transfer vectors to Eviews. Matching these to their differing samples looks even more complicated. 'Turn off external programming mode XOFF 'Close connection to R XCLOSE(type=r) '*********************************************************************************************** 'import ABS data to Eviews pagecreate(page=quarterly) q %sd %ed import %qdata pagecreate(page=monthly) m %sd %ed import %mdata 'convert monthly unemployment rate to quarterly smpl @all copy(c=mean) monthly\unempm quarterly\ur 'splice to historical series from modeller's database pageselect quarterly series urate_hist = 100*unempq/lfq scalar uratio = @elem(ur, "1978q2")/@elem(urate_hist, "1978q2") smpl @first 1978q1 ur = urate_hist*uratio 'to construct rulc we ignore employment subsidies and assume payroll tax rate was unchanged till 1972 scalar payroll_rate = @elem(payroll, "1972q3")/@elem(coe, "1972q3") smpl @first 1972q2 series payroll = payroll_rate*coe smpl @all series nulc2 = (coe+payroll)/nfgdp series rulc = nulc2/gne_price '================== 'from here, the code is the same as for the 2015 paper 'DATA PREPARATION '================== 'Change in the unemployment rate series dur = d(ur) 'Two-quarter GDP growth, annualised log changes series lgdp = log(gdp) series d2lgdp = 400* (lgdp-lgdp(-2))/2 'Two-quarter change in real unit labour costs, annualised log changes series lrulc = log(rulc) series d2lrulc = 400* (lrulc-lrulc(-2))/2 '===================== ' CONSTANT COEFFICIENTS MODEL '===================== smpl %estsd %ested coef(4) c1 equation okuneq1.ls(cov=white) dur = c1(2)*dur(-1) + c1(3)*(d2lgdp-c1(1)) + c1(4)*d2lrulc(-2) 'Model 1 'show okuneq1 '========================== ' SPECIFY THE KALMAN FILTER '========================== 'Priors on state variables vector(2) mprior mprior(1) = 4 'Prior on starting value for trend GDP growth (annual average GDP growth over 1950s) mprior(2) = 0 'Prior on starting value for lagged dependent variable sym(2) vprior vprior(1,1) = 5 'Prior on variance of trend GDP growth (variance of annual GDP growth over 1950s) vprior(2,2) = 1 'Prior on variance of lagged dependent variable 'Specify coefficient vector coef(8) ckf 'Declare state space sspace ss1 ss1.append dur = lag*dur(-1) + ckf(2)*(d2lgdp-trend)+ckf(3)*D2LRULC(-2)+[var=exp(ckf(4))] 'Measurement equation ss1.append @state trend = 1*trend(-1) + [var = exp(ckf(5))] 'State equation for trend GDP growth (random walk) ss1.append @state lag = 1*lag(-1) + [var = exp(ckf(6))] 'State equation for lagged dependent variable (random walk) 'Apply priors to state space ss1.append @mprior mprior ss1.append @vprior vprior 'Set parameter starting values param ckf(2) -0.0495 ckf(3) 0.01942 ckf(4) -2.8913 ckf(5) -4.1757 ckf(6) -6.2466 'starting values for parameters '===================== ' ESTIMATE THE MODEL '===================== 'Estimate state space smpl %estsd %ested 'Estimation sample ss1.ml(m=500,showopts) 'Estimate Kalman filter by maximum likelihood freeze(mytab) ss1.stats '===================== ' SAVE STATE VARIABLES '===================== ss1.makestates(t=filt) *filt 'Construct one-sided filtered estimates of trend GDP growth and coefficient on lagged dependent variable ss1.makestates(t=filtse) *filtse 'Standard error of estimate 'Graph state variables smpl 1965q1 %ested 'Begin graph sample later due to diffuse prior graph gr_trend.line trendfilt (trendfilt+trendfiltse) (trendfilt-trendfiltse) 'Trend GDP graph gr_trend.addtext(t,font(18)) "Potential GDP Growth +/- 1se" gr_trend.addtext(l,font(14)) "annual % growth" gr_trend.setelem(1) lcolor(blue) lwidth(2) gr_trend.setelem(2) lcolor(orange) lwidth(1) linepattern(dash6) gr_trend.setelem(3) lcolor(orange) lwidth(1) linepattern(dash6) gr_trend.legend -display gr_trend.axis(l) grid gr_trend.options frameaxes(all) %last = @str(@round(@last(trendfilt),1)) %note = "latest observation = " + %last + "% in " + %ested gr_trend.addtext(b) %note 'The frame and grid show differently on different printers and screens. To change, try: 'gr_trend.options framecolor(black) 'gr_trend.options gridcolor(black) graph gr_lag.line lagfilt (lagfilt+lagfiltse) (lagfilt-lagfiltse) 'Coefficient on lagged dependent variable gr_lag.addtext(t,font(18)) "Coefficient on Lagged Dependent Variable" gr_lag.setelem(1) lcolor(purple) gr_lag.setelem(2) lcolor(green) gr_lag.setelem(3) lcolor(green) show ss1 show gr_trend