-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreferences.bib
151 lines (151 loc) · 14.1 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
@book{Weisberg2005,
abstract = {In recent decades, the survey field has been revolutionized with increased attention to both its basis in social psychology and its statistical underpin- nings. The “total survey error” approach provides a new paradigm by which to understand and study this field, so it is time to proclaim the true arrival of survey research as a new science. This book uses the total sur- vey error approach to present a unified approach to understanding the components of good survey research. Survey research is an interesting research approach because the trade-offs between theory and practical considerations are quite direct. This has always been evident in survey sampling, where sampling statis- ticians learned early on to balance statistical theory with the difficulty of sampling in the real world. The initial knowledge of question writing was developed from practical interviewing, but in recent years the under- standing of this topic has become more theoretically informed through insights from social psychology. The chapters in this book reflect the current balance between theory and practice, statistics and cognitive psychology.},
author = {Herbert F. Weisberg},
city = {Chigago and London},
doi = {10.7208/chicago/9780226891293.001.0001},
isbn = {9780226891286},
month = {2},
publisher = {University of Chicago Press},
title = {The Total Survey Error Approach},
url = {http://www.bibliovault.org/BV.landing.epl?ISBN=9780226891286},
year = {2005},
}
@report{StatCan2019,
author = {Statistics-Canada},
issn = {1708-6256},
title = {Statistics Canada Quality Guidelines Sixth Edition},
url = {https://www150.statcan.gc.ca/n1/pub/12-539-x/12-539-x2019001-eng.pdf},
year = {2019},
}
@article{Seo2014,
abstract = {Purpose: To examine the influence of translation when measuring and comparing self-rated health (SRH) measured with five response categories (excellent, very good, good, fair, and poor), across racial/ethnic groups. Methods: Using data from the California Health Interview Survey, which were administered in five languages, we analyzed variations in the five-category SRH across five racial/ethnic groups: non-Hispanic white, Latino, Chinese, Vietnamese, and Korean. Logistic regression was used to estimate independent effects of race/ethnicity, culture, and translation on SRH, after controlling for risk factors and other measures of health status. Results: Latinos, Chinese, Vietnamese, and Koreans were less likely than non-Hispanic whites to rate their health as excellent or very good and more likely to rate it as good, fair, or poor. This racial/ethnic difference diminished when adjusting for acculturation. Independently of race/ethnicity, respondents using non-English surveys were less likely to answer excellent (OR = 0.24-0.55) and very good (OR = 0.30-0.34) and were more likely to answer fair (OR = 2.48-4.10) or poor (OR = 2.87-3.51), even after controlling for other measures of SRH. Conclusions: Responses to the five-category SRH question depend on interview language. When responding in Spanish, Chinese, Korean, or Vietnamese, respondents are more likely to choose a lower level SRH category, "fair" in particular. If each SRH category measured in different languages is treated as equivalent, racial/ethnic disparities in SRH among Latinos and Asian subgroups, as compared to non-Hispanic whites, may be exaggerated. © 2013 Springer Science+Business Media Dordrecht.},
author = {Sukyong Seo and Sukyung Chung and Martha Shumway},
doi = {10.1007/s11136-013-0522-6},
issn = {09629343},
issue = {2},
journal = {Quality of Life Research},
keywords = {Ethnicity,Public health,Self-rated health,Translation},
month = {3},
pages = {593-600},
pmid = {24026633},
publisher = {Springer},
title = {How good is "very good"? Translation effect in the racial/ethnic variation in self-rated health status},
volume = {23},
url = {https://link.springer.com/article/10.1007/s11136-013-0522-6},
year = {2014},
}
@report{Pan,
abstract = {The Census Bureau developed guidelines for the translation of data collection instruments and supporting materials in order to ensure that such documents translated from a source language into a target language are reliable, complete, accurate, and culturally appropriate. In addition to meeting these criteria, guidelines were developed to ensure that translated Census Bureau data collection instruments also have semantic, conceptual, and normative equivalence. The guideline recommends that the translation of data collection instruments from a source language into a target language be conducted using a translation team. The guideline relies on the cross-cultural and survey methodology research literature and specifies and describes five steps that comprise the translation process. These steps are: Prepare, Translate, Pretest, Revise, and Document.},
author = {Yuling Pan and Manuel de la Puente},
city = {Washington D.C.},
institution = {Statistical Research Division, U.S. Bureau of the Census},
journal = {Research Report Series},
keywords = {cross-cultural issues in survey design,data quality,non-English speaking populations,survey translation},
pages = {1-38},
title = {Census Bureau guideline for the translation of data collection instruments and supporting materials: documentation on how the guideline was developed},
volume = {6},
url = {https://www.census.gov/srd/papers/pdf/rsm2005-06.pdf},
year = {2005},
}
@book{Juran1980,
author = {Joseph Juran and Frank Gryna},
city = {New York},
edition = {2nd},
publisher = {McGraw- Hill.},
title = {Quality Planning and Analysis.},
year = {1980},
}
@article{Hicks2010,
abstract = {Computer audio-recorded interviewing (CARI) has been used for more than a decade to detect field-interview fabrication, but it has the potential for much more. On the 2007 National Home and Hospice Care Survey, an innovative CARI system combined with behavior coding assessed problematic questions, monitored general compliance with protocols, and evaluated individual interviewer performance. This article discusses the benefits of CARI for recording large samples of interviews in a systematic, objective manner at reduced cost and less operational burden than traditional audio recording. The increased objectivity and volume of recordings allowed identification of potential data-quality issues, specifically measurement error due to the questionnaire, the interviewer, and their interaction. CARI's potential as a tool for estimating the magnitude of the error in survey data is also discussed. © The Author 2011.},
author = {W. D. Hicks and B. Edwards and K. Tourangeau and B. McBride and L. D. Harris-Kojetin and A. J. Moss},
doi = {10.1093/poq/nfq063},
issn = {0033-362X},
issue = {5},
journal = {Public Opinion Quarterly},
month = {1},
pages = {985-1003},
publisher = {Oxford Academic},
title = {Using Cari Tools To Understand Measurement Error},
volume = {74},
url = {https://academic.oup.com/poq/article-lookup/doi/10.1093/poq/nfq063},
year = {2010},
}
@article{Groves2010,
author = {Robert M Groves and Lars Lyberg},
doi = {10.1093/poq/nfq065},
issue = {5},
journal = {Public Opinion Quarterly},
pages = {849-879},
title = {Total Survey Error Past, Present, and Future},
volume = {74},
year = {2010},
}
@book{Groves2004,
author = {Robert M Groves and Floyd J Fowler Jr and Mick P Couper and James M Lepkowski and Eleanor Singer and Roger Tourangeau},
city = {New York},
isbn = {1118211340},
publisher = {John Wiley & Sons},
title = {Survey methodology},
year = {2004},
}
@article{Groves2001,
abstract = {A theory of survey participation suggests that sample individuals engage in more thorough cognitive processing of the survey request when their concerns about the request are addressed by the interviewer. When the concerns are satisfactorily addressed, the interview becomes a more attractive option; when they are not, a refusal tends to occur. This theory has implications for the training of interviewers in recruiting sample individuals to be respondents. A training regimen was constructed that assembled concerns perceived by senior interviewers to be common, taught trainees to classify concerns (using the terminology of the respondents) into themes, taught trainees facts to communicate regarding those concerns, and drilled the trainees in rapid, natural delivery of those facts using terminology compatible with that of the sample person. Two experimental tests of the training regimen show increases in cooperation rates for interviewers who receive the training.},
author = {Robert M. Groves and Katherine A McGonagle},
issue = {2},
journal = {Journal of Official Statistics},
keywords = {Nonresponse,interviewer training},
pages = {249-265},
title = {A Theory-Guided Interviewer Training Protocol Regarding Survey Participation},
volume = {17},
year = {2001},
}
@book{EuropeanSocialSurvey2018,
abstract = {Abgerufen: 03.08.2020, 19:25},
author = {European Social Survey},
city = {London},
publisher = {ESS ERIC Headquarters},
title = {ESS Round 9 Translation Guidelines},
url = {https://www.europeansocialsurvey.org/docs/round9/methods/ESS9_translation_guidelines.pdf},
year = {2018},
}
@article{Biemer2010,
abstract = {The total survey error (TSE) paradigm provides a theoretical framework for optimizing surveys by maximizing data quality within budgetary constraints. In this article, the TSE paradigm is viewed as part of a much larger design strategy that seeks to optimize surveys by maximizing total survey quality; i.e., quality more broadly defined to include user-specified dimensions of quality. Survey methodology, viewed within this larger framework, alters our perspectives on the survey design, implementation, and evaluation. As an example, although a major objective of survey design is to maximize accuracy subject to costs and timeliness constraints, the survey budget must also accommodate additional objectives related to relevance, accessibility, interpretability, comparability, coherence, and completeness that are critical to a survey's "fitness for use." The article considers how the total survey quality approach can be extended beyond survey design to include survey implementation and evaluation. In doing so, the "fitness for use" perspective is shown to influence decisions regarding how to reduce survey error during design implementation and what sources of error should be evaluated in order to assess the survey quality today and to prepare for the surveys of the future. © The Author 2011.},
author = {Paul P. Biemer},
doi = {10.1093/POQ/NFQ058},
issn = {0033-362X},
issue = {5},
journal = {Public Opinion Quarterly},
month = {1},
pages = {817-848},
publisher = {Oxford Academic},
title = {Total Survey Error: Design, Implementation, and Evaluation},
volume = {74},
url = {https://academic.oup.com/poq/article/74/5/817/1815551},
year = {2010},
}
@book{Biemer2003,
abstract = {Peruse the history of survey research and the essential concepts for data quality. With an emphasis on total survey error, the authors review principles and concepts in the field and examine important unresolved issues in survey methods. Spanning a range of topics dealing with the quality of data collected through the survey process, they focus on such key issues as:Major sources of survey error, examining the origins of each error source most successful methods for reducing errors from those sourcesMethods most often used in practice for evaluating the effects of the source on total survey errorImplications of improving survey quality for organizational management and costs},
author = {Paul P. Biemer and Lars E. Lyberg},
city = {Hoboken, NJ, USA},
doi = {10.1002/0471458740},
isbn = {0471193755},
journal = {Introduction to Survey Quality},
month = {2},
publisher = {John Wiley & Sons, Inc.},
title = {Introduction to Survey Quality},
url = {http://doi.wiley.com/10.1002/0471458740},
year = {2003},
}
@article{Ackermann-Piek2020,
abstract = {Typically, interviewer training is implemented in order to minimize interviewer effects and ensure that interviewers are well prepared to administer the survey. Leading professional associations in the survey research landscape recommend the standardized implementation of interviewer training. Some large-scale multinational survey programs have produced their own training guidelines to ensure a comparable level of quality in the implementation of training across participating countries. However, the length, content, and methodology of interviewer training guidelines are very heterogeneous. In this paper, we provide a comparative overview of general and study-specific interviewer training guidelines of three multinational survey programs (ESS, PIAAC, SHARE). Using total survey error (TSE) as a conceptual framework, we map the general and study-specific training guidelines of the three multinational survey programs to components of the TSE to determine how they target the reduction of interviewer effects. Our results reveal that unit nonresponse error is covered by all guidelines; measurement error is covered by most guidelines; and coverage error, sampling error, and processing error are addressed either not at all or sparsely. We conclude, for example, that these guidelines could be an excellent starting point for new – small as well as large-scale – surveys to design their interviewer training, and that interviewer training guidelines should be made publicly available in order to provide a high level of transparency, thus enabling survey programs to learn from each other.},
author = {Daniela Ackermann-Piek and Henning Silber and Jessica Daikeler and Silke Martin and Brad Edwards},
doi = {10.12758/mda.2020.01},
issn = {21904936},
issue = {1},
journal = {Methods, Data, Analyses},
keywords = {Interviewer effects,Interviewer training guidelines,Multinational survey programs,Total survey error},
pages = {35-60},
title = {Interviewer training guidelines of multinational survey programs: A total survey error perspective},
volume = {14},
year = {2020},
}