@inproceedings { , title = {A semi-automatic methodology for facial landmark annotation}, abstract = {Developing powerful deformable face models requires massive, annotated face databases on which techniques can be trained, validated and tested. Manual annotation of each facial image in terms of landmarks requires a trained expert and the workload is usually enormous. Fatigue is one of the reasons that in some cases annotations are inaccurate. This is why, the majority of existing facial databases provide annotations for a relatively small subset of the training images. Furthermore, there is hardly any correspondence between the annotated landmarks across different databases. These problems make cross-database experiments almost infeasible. To overcome these difficulties, we propose a semi-automatic annotation methodology for annotating massive face datasets. This is the first attempt to create a tool suitable for annotating massive facial databases. We employed our tool for creating annotations for MultiPIE, XM2VTS, AR, and FRGC Ver. 2 databases. The annotations will be made publicly available from http://ibug.doc.ic.ac.uk/ resources/facial-point-annotations/. Finally, we present experiments which verify the accuracy of produced annotations.}, conference = {2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPRW)}, note = {Copyright Statement: Copyright information regarding this work can be found at the following address: http://eprints.nottingham.ac.uk/end\_user\_agreement.pdf}, organization = {Portland, Oregon, USA}, publicationstatus = {Published}, publisher = {Institute of Electrical and Electronics Engineers}, url = {https://nottingham-repository.worktribe.com/output/1002059}, keyword = {Face recognition, Image retrieval, Visual databases}, year = {2013}, author = {Sagonas, Christos and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja} }