@conference{MOB-LREC2016, author = "Fotinea, Stavroula-Evita and Efthimiou, Eleni and Koutsombogera, Maria and Dimou, Athanasia–Lida and Goulas, Theodoros and Vasilaki, Kiriaki", abstract = "This paper reports on work related to the modelling of Human-Robot Communication on the basis of multimodal and multisensory human behaviour analysis. A primary focus in this framework of analysis is the definition of semantics of human actions in interaction, their capture and their representation in terms of behavioural patterns that, in turn, feed a multimodal human-robot communication system. Semantic analysis encompasses both oral and sign languages, as well as both verbal and non-verbal communicative signals to achieve an effective, natural interaction between elderly users with slight walking and cognitive inability and an assistive robotic platform.", booktitle = "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)", editor = "N. Calzolari, K. Choukri, T. Declerck, S. Goggi, M. Grobelnik, B. Maegaard, J. Mariani, H. Mazo, A. Moreno, J. Odijk, S. Piperidis", isbn = "978-2-9517408-9-1", keywords = "multisensory data acquisition, multimodal semantics, multimodal annotation scheme, sign language, multimodal HRI model extraction, multimodal human-robot communication, natural HR", month = "May", pages = "3455-3460", publisher = "European Language Resources Association (ELRA)", title = "{M}ultimodal {R}esources for {H}uman-Robot {C}ommunication {M}odelling ", url = "http://www.lrec-conf.org/proceedings/lrec2016/pdf/452_Paper.pdf", year = "2016", }