2020
|
Koulierakis, Ioannis; Siolas, Georgios; Efthimiou, Eleni; Fotinea, Stavroula-Evita; Stafylopatis, Andreas Recognition of Static Features in Sign Language Using Key-Points (Conference) Proceedings of the LREC2020 9th Workshop on the Representation and Processing of Sign Languages: Sign Language Resources in the Service of the Language Community, Technological Challenges and Application Perspectives, 2020. (Abstract | Links | BibTeX) @conference{koulierakis2020recognition,
title = {Recognition of Static Features in Sign Language Using Key-Points},
author = {Ioannis Koulierakis and Georgios Siolas and Eleni Efthimiou and Stavroula-Evita Fotinea and Andreas Stafylopatis},
url = {https://www.aclweb.org/anthology/2020.signlang-1.20/},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the LREC2020 9th Workshop on the Representation and Processing of Sign Languages: Sign Language Resources in the Service of the Language Community, Technological Challenges and Application Perspectives},
pages = {123--126},
abstract = {In this paper we report on a research effort focusing on recognition of static features of sign formation in single sign videos. Three sequential models have been developed for handshape, palm orientation and location of sign formation respectively, which make use of key-points extracted via OpenPose software. The models have been applied to a Danish and a Greek Sign Language dataset, providing results around 96%. Moreover, during the reported research, a method has been developed for identifying the time-frame of real signing in the video, which allows to ignore transition frames during sign recognition processing.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
In this paper we report on a research effort focusing on recognition of static features of sign formation in single sign videos. Three sequential models have been developed for handshape, palm orientation and location of sign formation respectively, which make use of key-points extracted via OpenPose software. The models have been applied to a Danish and a Greek Sign Language dataset, providing results around 96%. Moreover, during the reported research, a method has been developed for identifying the time-frame of real signing in the video, which allows to ignore transition frames during sign recognition processing. |
Potamianos, Gerasimos; Papadimitriou, Katerina; Efthimiou, Eleni; Fotinea, Stavroula-Evita; Sapountzaki, Galini; Maragos, Petros SL-ReDu: Greek sign language recognition for educational applications. Project description and early results (Conference) Proceedings of the 13th ACM International Conference on PErvasive Technologies Related to Assistive Environments, 2020. (Abstract | Links | BibTeX) @conference{potamianos2020sl,
title = {SL-ReDu: Greek sign language recognition for educational applications. Project description and early results},
author = {Gerasimos Potamianos and Katerina Papadimitriou and Eleni Efthimiou and Stavroula-Evita Fotinea and Galini Sapountzaki and Petros Maragos},
url = {https://www.researchgate.net/publication/342537332_SL-ReDu_greek_sign_language_recognition_for_educational_applications_Project_description_and_early_results},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the 13th ACM International Conference on PErvasive Technologies Related to Assistive Environments},
pages = {1--6},
abstract = {We present SL-ReDu, a recently commenced innovative project that aims to exploit deep-learning progress to advance the state-of-the-art in video-based automatic recognition of Greek Sign Language (GSL), while focusing on the use-case of GSL education as a second language. We first briefly overview the project goals, focal areas, and timeline. We then present our initial deep learning-based approach for GSL recognition that employs efficient visual tracking of the signer hands, convolutional neural networks for feature extraction, and attention-based encoder-decoder sequence modeling for sign prediction. Finally, we report experimental results for small-vocabulary, isolated GSL recognition on the single-signer "Polytropon" corpus. To our knowledge, this work constitutes the first application of deep-learning techniques to GSL.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
We present SL-ReDu, a recently commenced innovative project that aims to exploit deep-learning progress to advance the state-of-the-art in video-based automatic recognition of Greek Sign Language (GSL), while focusing on the use-case of GSL education as a second language. We first briefly overview the project goals, focal areas, and timeline. We then present our initial deep learning-based approach for GSL recognition that employs efficient visual tracking of the signer hands, convolutional neural networks for feature extraction, and attention-based encoder-decoder sequence modeling for sign prediction. Finally, we report experimental results for small-vocabulary, isolated GSL recognition on the single-signer "Polytropon" corpus. To our knowledge, this work constitutes the first application of deep-learning techniques to GSL. |
Moustris, George; Kardaris, Nikolaos; Tsiami, Antigoni; Chalvatzaki, Georgia; Koutras, Petros; Dometios, Athanasios; Oikonomou, Paris; Tzafestas, Costas; Maragos, Petros; Efthimiou, Eleni; Papageorgiou, Xanthi; Fotinea, Stavroula-Evita; Koumpouros, Yiannis; Vacalopoulou, Anna; Karavasili, Alexandra; Nikolakakis, Alexandros; Karaiskos, Konstantinos; Mavridis, Panagiotis The i-Walk assistive robot: a multimodal intelligent robotic rollator providing cognitive and mobility assistance to the elderly and motor-impaired (Technical Report) EasyChair 2020. (Abstract | Links | BibTeX) @techreport{moustris2020walk,
title = {The i-Walk assistive robot: a multimodal intelligent robotic rollator providing cognitive and mobility assistance to the elderly and motor-impaired},
author = {George Moustris and Nikolaos Kardaris and Antigoni Tsiami and Georgia Chalvatzaki and Petros Koutras and Athanasios Dometios and Paris Oikonomou and Costas Tzafestas and Petros Maragos and Eleni Efthimiou and Xanthi Papageorgiou and Stavroula-Evita Fotinea and Yiannis Koumpouros and Anna Vacalopoulou and Alexandra Karavasili and Alexandros Nikolakakis and Konstantinos Karaiskos and Panagiotis Mavridis},
url = {https://login.easychair.org/publications/preprint/xt3m},
year = {2020},
date = {2020-01-01},
institution = {EasyChair},
abstract = {Robotic rollators can play a significant role as assistive devices for people with impaired movement and mild cognitive deficit. This paper presents an overview of the i-Walk concept; an intelligent robotic rollator offering cognitive and ambulatory assistance to people with light to moderate movement impairment, such as the elderly. We discuss the two robotic prototypes being developed, their various novel functionalities, system architecture, modules and their function scope, and present preliminary experimental results with actual users.
},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Robotic rollators can play a significant role as assistive devices for people with impaired movement and mild cognitive deficit. This paper presents an overview of the i-Walk concept; an intelligent robotic rollator offering cognitive and ambulatory assistance to people with light to moderate movement impairment, such as the elderly. We discuss the two robotic prototypes being developed, their various novel functionalities, system architecture, modules and their function scope, and present preliminary experimental results with actual users.
|
Papageorgiou, Xanthi; Tsampounaris, George; Karavasili, Alexandra; Efthimiou, Eleni; Fotinea, Stavroula-Evita; Vacalopoulou, Anna; Karioris, Panagiotis; Koureta, Fotini; Alexopoulou, Despina; Dimou, Dimitris User-centered implementation of rehabilitation exercising on an assistive robotic platform (Conference) International Conference on Human-Computer Interaction, Springer 2020. (Abstract | Links | BibTeX) @conference{papageorgiou2020user,
title = {User-centered implementation of rehabilitation exercising on an assistive robotic platform},
author = {Xanthi Papageorgiou and George Tsampounaris and Alexandra Karavasili and Eleni Efthimiou and Stavroula-Evita Fotinea and Anna Vacalopoulou and Panagiotis Karioris and Fotini Koureta and Despina Alexopoulou and Dimitris Dimou},
url = {https://link.springer.com/chapter/10.1007/978-3-030-60149-2_52},
year = {2020},
date = {2020-01-01},
booktitle = {International Conference on Human-Computer Interaction},
pages = {689--698},
organization = {Springer},
abstract = {The paper focuses on the method and steps implementing a suite of rehabilitation exercises on an assistive robotic platform. The suite is based on extensive user needs identification procedures and consultation with medical and rehabilitation experts. For the design of the human-robot interaction (HRI) component of the platform, the user centered approach was adopted, which in this case employed a range of multimodal interaction facilities including a free user-robot dialogue, visual and speech signals.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
The paper focuses on the method and steps implementing a suite of rehabilitation exercises on an assistive robotic platform. The suite is based on extensive user needs identification procedures and consultation with medical and rehabilitation experts. For the design of the human-robot interaction (HRI) component of the platform, the user centered approach was adopted, which in this case employed a range of multimodal interaction facilities including a free user-robot dialogue, visual and speech signals. |
2019
|
Efthimiou, Eleni; Fotinea, Stavroula-Evita; Goulas, Theodoros; Vacalopoulou, Anna; Vasilaki, Kiriaki; Athanasia–Lida, Dimou Sign language technologies and the critical role of SL resources in view of future internet accessibility services (Journal Article) Technologies, 7 (1), pp. 18, 2019. (Abstract | Links | BibTeX) @article{efthimiou2019sign,
title = {Sign language technologies and the critical role of SL resources in view of future internet accessibility services},
author = { Eleni Efthimiou and Stavroula-Evita Fotinea and Theodoros Goulas and Anna Vacalopoulou and Kiriaki Vasilaki and Dimou Athanasia–Lida},
url = {https://www.mdpi.com/2227-7080/7/1/18},
year = {2019},
date = {2019-01-01},
journal = {Technologies},
volume = {7},
number = {1},
pages = {18},
publisher = {Multidisciplinary Digital Publishing Institute},
abstract = {In this paper, we touch upon the requirement for accessibility via Sign Language as regards dynamic composition and exchange of new content in the context of natural language-based human interaction, and also the accessibility of web services and electronic content in written text by deaf and hard-of-hearing individuals. In this framework, one key issue remains the option for composition of signed “text”, along with the ability for the reuse of pre-existing signed “text” by exploiting basic editing facilities similar to those available for written text that serve vocal language representation. An equally critical related issue is accessibility of vocal language text by born or early deaf signers, as well as the use of web-based facilities via Sign Language-supported interfaces, taking into account that the majority of native signers present limited reading skills. It is, thus, demonstrated how Sign Language technologies and resources may be integrated in human-centered applications, enabling web services and content accessibility in the education and an everyday communication context, in order to facilitate integration of signer populations in a societal environment that is strongly defined by smart life style conditions. This potential is also demonstrated by end-user-evaluation results.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
In this paper, we touch upon the requirement for accessibility via Sign Language as regards dynamic composition and exchange of new content in the context of natural language-based human interaction, and also the accessibility of web services and electronic content in written text by deaf and hard-of-hearing individuals. In this framework, one key issue remains the option for composition of signed “text”, along with the ability for the reuse of pre-existing signed “text” by exploiting basic editing facilities similar to those available for written text that serve vocal language representation. An equally critical related issue is accessibility of vocal language text by born or early deaf signers, as well as the use of web-based facilities via Sign Language-supported interfaces, taking into account that the majority of native signers present limited reading skills. It is, thus, demonstrated how Sign Language technologies and resources may be integrated in human-centered applications, enabling web services and content accessibility in the education and an everyday communication context, in order to facilitate integration of signer populations in a societal environment that is strongly defined by smart life style conditions. This potential is also demonstrated by end-user-evaluation results. |