@conference{Athanaselisetal2006, author = "Athanaselis, Theologos and Bakamidis, Stylianos and Dologlou, Ioannis", abstract = "Recognising the verbal content of emotional speech is a difficult problem, and recognition rates reported in the literature are in fact low. Although knowledge in the area has been developing rapidly, it is still limited in fundamental ways. The first issue concerns that not much of the spectrum of emotionally coloured expressions has been studied. The second issue is that most research on speech and emotion has focused on recognising the emotion being expressed and not on the classic Automatic Speech Recognition (ASR) problem of recovering the verbal content of the speech. Read speech and non-read speech in a ‘careful’ style can be recognized with accuracy higher than 95% using the state-of-the-art speech recognition technology. Including information about prosody improves recognition rate for emotions simulated by actors, but its relevance to the freer patterns of spontaneous speech is unproven. This paper shows that recognition rate for emotionally coloured speech can be improved by using a language model based on increased representation of emotional utterances.", address = "Vienna, Austria", booktitle = "Proceedings of the Transactions on Engineering, Computing and Technology", keywords = "verbal content of emotional speech;Automatic Speech Recognition ASR;Read speech and non read speech", month = "ICCS2006", pages = " 274-277 ", title = "{R}ecognising {V}erbal {C}ontent of {E}motionally {C}oloured {S}peech ", volume = "12", year = "2006", }