Tsiami, Antigoni; Koutras, Petros; Katsamanis, Athanasios; Vatakis, Argiro; Maragos, Petros
A behaviorally inspired fusion approach for computational audiovisual saliency modeling Journal Article
In: Signal Processing: Image Communication, vol. 76, pp. 186–200, 2019.
@article{tsiami2019behaviorally,
title = {A behaviorally inspired fusion approach for computational audiovisual saliency modeling},
author = { Antigoni Tsiami and Petros Koutras and Athanasios Katsamanis and Argiro Vatakis and Petros Maragos},
year = {2019},
date = {2019-01-01},
journal = {Signal Processing: Image Communication},
volume = {76},
pages = {186--200},
publisher = {Elsevier},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Vatakis, Argiro; Katerina, Pastra
A multimodal dataset of spontaneous speech and movement production on object affordances Journal Article
In: Scientific Data, vol. 3, no. 150078, 2016.
@article{Pastra&Vatakis2016,
title = {A multimodal dataset of spontaneous speech and movement production on object affordances},
author = {Argiro Vatakis and Pastra Katerina},
url = {http://www.nature.com/articles/sdata201578},
doi = {10.1038/sdata.2015.78},
year = {2016},
date = {2016-01-01},
journal = {Scientific Data},
volume = {3},
number = {150078},
abstract = {In the longstanding effort of defining object affordances, a number of resources have been developed on objects and associated knowledge. These resources, however, have limited potential for modeling and generalization mainly due to the restricted, stimulus-bound data collection methodologies adopted. To-date, therefore, there exists no resource that truly captures object affordances in a direct, multimodal, and naturalistic way. Here, we present the first such resource of ‘thinking aloud’, spontaneously-generated verbal and motoric data on object affordances. This resource was developed from the reports of 124 participants divided into three behavioural experiments with visuo-tactile stimulation, which were captured audiovisually from two camera-views (frontal/profile). This methodology allowed the acquisition of approximately 95 hours of video, audio, and text data covering: object-feature-action data (e.g., perceptual features, namings, functions), Exploratory Acts (haptic manipulation for feature acquisition/verification), gestures and demonstrations for object/feature/action description, and reasoning patterns (e.g., justifications, analogies) for attributing a given characterization. The wealth and content of the data make this corpus a one-of-a-kind resource for the study and modeling of object affordances.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Vatakis, Argiro; Katerina, Pastra; Dimitrakis, Panagiotis
Acquisition of object knowledge through Exploratory Acts Proceedings Article
In: 15th International Multisensory Research Forum (IMRF), Amsterdam, NL., 2014.
@inproceedings{vatakis2014acquisition,
title = {Acquisition of object knowledge through Exploratory Acts},
author = {Argiro Vatakis and Pastra Katerina and Panagiotis Dimitrakis},
year = {2014},
date = {2014-06-01},
urldate = {2014-06-01},
booktitle = {15th International Multisensory Research Forum (IMRF)},
address = {Amsterdam, NL.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wallraven, C; Schultze, M; Mohler, B; Vatakis, Argiro; Katerina, Pastra
The POETICON enacted scenario corpus - a tool for human and computational experiments on action understanding Proceedings Article
In: Proceedings of the 9th IEEE conference on Authomatic Face and Gesture Recognition (FG'11), Santa Barbara, CA, 2011.
@inproceedings{Pastra2011c,
title = {The POETICON enacted scenario corpus - a tool for human and computational experiments on action understanding},
author = {C Wallraven and M Schultze and B Mohler and Argiro Vatakis and Pastra Katerina},
url = {http://poeticoncorpus.kyb.mpg.de/pub/fg2011.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {Proceedings of the 9th IEEE conference on Authomatic Face and Gesture Recognition (FG'11)},
address = {Santa Barbara, CA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Katerina, Pastra; Wallraven, C; Schiltze, M; Vatakis, Argiro; Kaulard, K
The POETICON Corpus: Capturing Language Use and Sensorimotor Experience in Everyday Interaction Proceedings Article
In: Choukri, Bente Maegaard Joseph Mariani Jan Odijk Stelios Piperidis Mike Rosner Daniel Tapias Nicoletta Calzolari Khalid (Ed.): Proceedings of the Seventh conference on International Language Resources and Evaluation (LREC'10), European Language Resources Association (ELRA), 2010, ISBN: 2-9517408-6-7.
@inproceedings{Pastraetal.2010,
title = {The POETICON Corpus: Capturing Language Use and Sensorimotor Experience in Everyday Interaction},
author = {Pastra Katerina and C Wallraven and M Schiltze and Argiro Vatakis and K Kaulard},
editor = {Bente Maegaard Joseph Mariani Jan Odijk Stelios Piperidis Mike Rosner Daniel Tapias Nicoletta Calzolari Khalid Choukri},
url = {http://www.lrec-conf.org/proceedings/lrec2010/pdf/778_Paper.pdf},
isbn = {2-9517408-6-7},
year = {2010},
date = {2010-05-01},
booktitle = {Proceedings of the Seventh conference on International Language Resources and Evaluation (LREC'10)},
publisher = {European Language Resources Association (ELRA)},
abstract = {Natural language use, acquisition, and understanding takes place usually in multisensory and multimedia communication environments. Therefore, for one to model language in its interaction and integration with sensorimotor experiences, one needs a representative corpus of such interplay. In this paper, we will present the first corpus of language use and sensorimotor experience recordings in everyday human:human interaction, in which spontaneous language communication has been recorded along with corresponding multiview video recordings, recordings of 3D full body kinematics, and 3D tracking of objects in focus. It is a twelve-hour corpus which comprises of six everyday human:human interaction scenes, each one performed 3 times by 4 different English-speaking couples (interaction between a male and a female actor), each couple acting each scene in two settings: a fully naturalistic setting in which 5-camera multi-view video recordings take place, and a high-tech setting, with full body motion capture for both individuals, a 2-camera multiview video recording, and 3D tracking of focus objects. The corpus has been developed within an EU-funded cognitive systems research project, POETICON (http://www.poeticon.eu), and represents a new type of language resources for cognitive systems. Namely, a corpus that reveals the dynamic role of language in its interplay with sensorimotor experiences and which allows one to computationally model this interplay.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}