@inbook{77f60cf37b664059a974e473ff8dee6a,
title = "Design Effects on Findings in Simulations Comparing Formative and Reflective Measurement Specifications",
abstract = "In a highly-cited paper on formative measurement, Jarvis et al. (2003; henceforth JMP) use simulation results to argue that “misspecification of even one formatively measured construct within a typical structural equation model can have very serious consequences for the theoretical conclusions drawn from that model” (JMP, p. 212). Aguirre-Urreta and Marakas (2012; henceforth AUM) extend JMP{\textquoteright}s simulations to focus on differences in the standardized structural coefficients between the formative and reflective specifications. They conclude “that a lack of attention to the metric of latent variables is responsible for the posited bias [from reflective analyses of formative measures], and when considering the relationships in their standardized form neither the direction nor the magnitude of relationships are biased to the degree previously discussed” (AUM, p. 124). They further note that “the consequences [of alternative measurement specifications] might not be as bleak and dire as previously thought” (AUM, p. 137). Jarvis et al. (2012, p. 144) respond “that the consequences of measurement model misspecification are exactly as dire as previously thought. Those consequences simply never had anything to do with the standardized parameter estimates.” The present study clarifies several issues in the debate between JMP and AUM. One especially important point is that differences in unstandardized estimates from formative and reflective specifications depend on the arbitrary metrics of the latent constructs. Standardizing the constructs to have unit variance facilitates comparison of the structural coefficients across measurement models, but other scalings could be used to make the differences as large or as small as desired. Therefore, “bias” in terms of the observed unstandardized coefficients is uninterpretable rather than “dire.” The study also shows that because reflective analyses use measure unreliability to disattenuate structural relationships between constructs, reflective specifications can magnify the correlation between other constructs in the model. The amount of magnification varies with the degree of measurement error (or inversely with the degree of reliability). Finally, the study explains why AUM observe a greater discrepancy between formative and reflective specifications when the construct involved is endogenous, which “increases as the intercorrelation among the formative items increases, a finding not reported in previous research” (AUM, p. 130). This result is an artefact of the simulation design, which forces uncorrelated indicators in the formative specification to be correlated in the reflective specification. An alternative and more justifiable specification of the formative model in the simulation design would not have produced this result when contrasted with the reflective specification (AUM; Cadogan and Lee 2012; Rigdon 2012).",
keywords = "Formative, Misspecification, Reflective, SEM",
author = "Franke, {George R.} and Woojung Chang and Nick Lee",
note = "Publisher Copyright: {\textcopyright} 2016, Academy of Marketing Science.",
year = "2016",
doi = "10.1007/978-3-319-11815-4_119",
language = "English",
series = "Developments in Marketing Science: Proceedings of the Academy of Marketing Science",
publisher = "Springer Nature",
pages = "435--436",
booktitle = "Developments in Marketing Science",
address = "Switzerland",
}