@misc { , title = {GesRec3D: a real-time coded gesture-to-speech system with automatic segmentation and recognition thresholding using dissimilarity measures}, abstract = {A complete microcomputer system is described, GesRec3D, which facilitates the data acquisition, segmentation, learning, and recognition of 3-Dimensional arm gestures, with application as a Augmentative and Alternative Communication (AAC) aid for people with motor and speech disability. The gesture data is acquired from a Polhemus electro-magnetic tracker system, with sensors attached to the finger, wrist and elbow of one arm. Coded gestures are linked to user-defined text, to be spoken by a text-to-speech engine that is integrated into the system. A segmentation method and an algorithm for classification are presented that includes acceptance/rejection thresholds based on intra-class and inter-class dissimilarity measures. Results of recognition hits, confusion misses and rejection misses are given for two experiments, involving predefined and arbitrary 3D gestures.}, doi = {10.1007/978-3-540-24598-8\_21}, isbn = {9783540210726}, issue = {2915}, publicationstatus = {Published}, url = {https://nottingham-repository.worktribe.com/output/1021327}, keyword = {gesture recognition, dissimilarity, similarity, segmentation, text-to-speech, gesture-to-speech, sign language, 3D tracking, Augmentative and Alternative Communication, AAC, human computer interaction, HCI}, year = {2004}, author = {Craven, Michael P. and Curtis, K. Mervyn} editor = {Camurri, Antonio and Volpe, Gualtiero} }