Papageorgiou, Xanthi; Tzafestas, Costas; Maragos, Petros; Pavlakos, Georgios; Chalvatzaki, Georgia; Moustris, George; Kokkinos, Iasonas; Peer, Angelika; Stanczyk, Bartlomiej; Fotinea, Stavroula-Evita; others,
Advances in intelligent mobility assistance robot integrating multimodal sensory processing Proceedings Article
In: International Conference on Universal Access in Human-Computer Interaction, pp. 692–703, Springer 2014.
@inproceedings{papageorgiou2014advances,
title = {Advances in intelligent mobility assistance robot integrating multimodal sensory processing},
author = {Xanthi Papageorgiou and Costas Tzafestas and Petros Maragos and Georgios Pavlakos and Georgia Chalvatzaki and George Moustris and Iasonas Kokkinos and Angelika Peer and Bartlomiej Stanczyk and Stavroula-Evita Fotinea and others},
url = {https://link.springer.com/chapter/10.1007/978-3-319-07446-7_66},
year = {2014},
date = {2014-01-01},
booktitle = {International Conference on Universal Access in Human-Computer Interaction},
pages = {692--703},
organization = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fotinea, Stavroula-Evita; Efthimiou, Eleni; Athanasia–Lida, Dimou; Goulas, Theodoros; Karioris, Panagiotis; Peer, Angelika; Maragos, Petros; Tzafestas, Costas; Kokkinos, Iasonas; Hauer, Klaus; others,
Data acquisition towards defining a multimodal interaction model for human--assistive robot communication Proceedings Article
In: International Conference on Universal Access in Human-Computer Interaction, pp. 613–624, Springer 2014.
@inproceedings{fotinea2014data,
title = {Data acquisition towards defining a multimodal interaction model for human--assistive robot communication},
author = {Stavroula-Evita Fotinea and Eleni Efthimiou and Dimou Athanasia–Lida and Theodoros Goulas and Panagiotis Karioris and Angelika Peer and Petros Maragos and Costas Tzafestas and Iasonas Kokkinos and Klaus Hauer and others},
url = {https://link.springer.com/chapter/10.1007/978-3-319-07446-7_59},
year = {2014},
date = {2014-01-01},
booktitle = {International Conference on Universal Access in Human-Computer Interaction},
pages = {613--624},
organization = {Springer},
abstract = {We report on the procedures followed in order to acquire a multimodal sensory corpus that will become the primary source of data retrieval, data analysis and testing of mobility assistive robot prototypes in the European project MOBOT. Analysis of the same corpus with respect to all sensorial data will lead to the definition of the multimodal interaction model; gesture and audio data analysis is foreseen to be integrated into the platform in order to facilitate the communication channel between end users and the assistive robot prototypes expected to be the project’s outcomes. In order to allow estimation of the whole range of sensorial data acquired, we will refer to the data acquisition scenarios followed in order to obtain the required multisensory data and to the initial post-processing outcomes currently available.
},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fotinea, Stavroula-Evita; Efthimiou, Eleni; Caridakis, George; Diamanti, O; Mitsiou, N; Tzafestas, Costas; Maragos, P
DIANOEMA: Visual analysis and sign recognition for GSL modelling and robot teleoperation Proceedings Article
In: Proc. of the 8th International Gesture Workshop, Bielefeld, Germany, 2009.
@inproceedings{Foetal2009,
title = {DIANOEMA: Visual analysis and sign recognition for GSL modelling and robot teleoperation},
author = {Stavroula-Evita Fotinea and Eleni Efthimiou and George Caridakis and O Diamanti and N Mitsiou and Costas Tzafestas and P Maragos},
url = {https://www.techfak.uni-bielefeld.de/ags/wbski/GW2009/page6/abstracts/FotineaEA.pdf},
year = {2009},
date = {2009-02-01},
booktitle = {Proc. of the 8th International Gesture Workshop},
address = {Bielefeld, Germany},
abstract = {Here we present research work performed in the framework of the Greek national project DIANOEMA (GSRT, M3.3, id 35), focusing on the following activities: i) Development of innovative image analysis and computer vision algorithms for the effective visual analysis of video sequences, aiming at sign detection and tracking; ii) Creation of a video-corpus of the Greek Sign Language (GSL) and annotation and modelling of an indicative subset of it; iii) Automatic recognition of indicative categories of GSL gestures using automatic computer vision systems pre-trained on the GSL corpus, and combining AI techniques, machine learning and probabilistic analysis for the estimation of gesture instantiations. iv) Integration of the above into a pilot application system of robot tele-operation, on the basis of a pre-defined vocabulary of simple signs for the tele-operation control.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tzafestas, Costas; Mitsou, N; Georgakarakos, N; Diamanti, O; Maragos, P; Fotinea, Stavroula-Evita
Gestural teleoperation of a mobile robot based on visual recognition of sign language static handshapes Proceedings Article
In: Proc. of 18th IEEE International Symposium on Robot and Human Interactive Communication, Japan, 2009.
@inproceedings{Tzafestasetal2009,
title = {Gestural teleoperation of a mobile robot based on visual recognition of sign language static handshapes},
author = {Costas Tzafestas and N Mitsou and N Georgakarakos and O Diamanti and P Maragos and Stavroula-Evita Fotinea},
url = {https://www.researchgate.net/publication/224079158_Gestural_teleoperation_of_a_mobile_robot_based_on_visual_recognition_of_sign_language_static_handshapes},
year = {2009},
date = {2009-09-01},
booktitle = {Proc. of 18th IEEE International Symposium on Robot and Human Interactive Communication},
address = {Japan},
abstract = {This paper presents results achieved in the frames of the DIANOEMA research project, in the framework of which visual analysis and sign recognition techniques have been explored on Greek Sign Language (GSL) data aiming, besides GSL modelling, at a pilot application on a mobile robot teleoperation. A small vocabulary of hand signs is designed to enable desktop-based teleoperation at a high-level of supervisory telerobotic control. Real-time visual recognition of the hand images is performed by training a multi-layer perceptron (MLP) neural network. Various shape descriptors of the segmented hand posture images have been explored as inputs to the MLP network. These include Fourier shape descriptors on the contour of the segmented hand sign images, moments, compactness, eccentricity, and histogram of the curvature. It is examined which of these shape descriptors are best suited for real-time recognition of hand signs, in relation to the number and choice of hand postures, in order to achieve maximum recognition performance. The hand-sign recognizer has been integrated in a graphical user interface, and has been implemented with success to a pilot application for real-time desktop-based gestural teleoperation of a mobile robot vehicle.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Koumpouros, Giannis; Karavasili, Alexandra; Maragos, Petros; Tzafestas, Costas; Fotinea, Stavroula-Evita; Efthimiou, Eleni; Papastamatiou, Nikolaos; Nikolakakis, Alexandros; Papageorgiou, Effie
Assessment of an intelligent robotic rehabilitation assistant Journal Article
In: 0000.
@article{koumpourosassessment,
title = {Assessment of an intelligent robotic rehabilitation assistant},
author = {Giannis Koumpouros and Alexandra Karavasili and Petros Maragos and Costas Tzafestas and Stavroula-Evita Fotinea and Eleni Efthimiou and Nikolaos Papastamatiou and Alexandros Nikolakakis and Effie Papageorgiou},
keywords = {},
pubstate = {published},
tppubtype = {article}
}