@article { , title = {Rating crowdsourced annotations: evaluating contributions of variable quality and completeness}, abstract = {Crowdsourcing has become a popular means to acquire data about the Earth and its environment inexpensively, but the data-sets obtained are typically imperfect and of unknown quality. Two common imperfections with crowdsourced data are the contributions from cheats or spammers and missing cases. The effect of the latter two imperfections on a method to evaluate the accuracy of crowdsourced data via a latent class model was explored. Using simulated and real data-sets, it was shown that the method is able to derive useful information on the accuracy of crowdsourced data even when the degree of imperfection was very high. The practical potential of this ability to obtain accuracy information within the geospatial sciences and the realm of Digital Earth applications was indicated with reference to an evaluation of building damage maps produced by multiple bodies after the 2010 earthquake in Haiti. Critically, the method allowed data-sets to be ranked in approximately the correct order of accuracy and this could help ensure that the most appropriate data-sets are used. © 2013 The Author(s). Published by Taylor \& Francis.}, doi = {10.1080/17538947.2013.839008}, eissn = {1753-8955}, issn = {1753-8947}, issue = {8}, journal = {International Journal of Digital Earth}, pages = {650-670}, publicationstatus = {Published}, publisher = {Taylor \& Francis Open}, url = {https://nottingham-repository.worktribe.com/output/719379}, volume = {7}, year = {2014}, author = {Foody, Giles M.} }