Zlatintsi, Athanasia; Koutras, Petros; Evangelopoulos, Georgios; Malandrakis, Nikolaos; Efthymiou, Niki; Katerina, Pastra; Potamianos, Alexandros; Maragos, Petros COGNIMUSE: a multimodal video database annotated with saliency, events, semantics and emotion with application to summarization (Journal Article) EURASIP Journal on Image and Video Processing, 2017 (1), pp. 54, 2017. (BibTeX) @article{zlatintsi2017cognimuse,
title = {COGNIMUSE: a multimodal video database annotated with saliency, events, semantics and emotion with application to summarization},
author = {Athanasia Zlatintsi and Petros Koutras and Georgios Evangelopoulos and Nikolaos Malandrakis and Niki Efthymiou and Pastra Katerina and Alexandros Potamianos and Petros Maragos},
year = {2017},
date = {2017-01-01},
journal = {EURASIP Journal on Image and Video Processing},
volume = {2017},
number = {1},
pages = {54},
publisher = {Nature Publishing Group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Vatakis, Argiro; Katerina, Pastra A multimodal dataset of spontaneous speech and movement production on object affordances (Journal Article) Scientific Data, 3 (150078), 2016. (Abstract | Links | BibTeX) @article{Pastra&Vatakis2016,
title = {A multimodal dataset of spontaneous speech and movement production on object affordances},
author = {Argiro Vatakis and Pastra Katerina},
url = {http://www.nature.com/articles/sdata201578},
doi = {10.1038/sdata.2015.78},
year = {2016},
date = {2016-01-01},
journal = {Scientific Data},
volume = {3},
number = {150078},
abstract = {In the longstanding effort of defining object affordances, a number of resources have been developed on objects and associated knowledge. These resources, however, have limited potential for modeling and generalization mainly due to the restricted, stimulus-bound data collection methodologies adopted. To-date, therefore, there exists no resource that truly captures object affordances in a direct, multimodal, and naturalistic way. Here, we present the first such resource of ‘thinking aloud’, spontaneously-generated verbal and motoric data on object affordances. This resource was developed from the reports of 124 participants divided into three behavioural experiments with visuo-tactile stimulation, which were captured audiovisually from two camera-views (frontal/profile). This methodology allowed the acquisition of approximately 95 hours of video, audio, and text data covering: object-feature-action data (e.g., perceptual features, namings, functions), Exploratory Acts (haptic manipulation for feature acquisition/verification), gestures and demonstrations for object/feature/action description, and reasoning patterns (e.g., justifications, analogies) for attributing a given characterization. The wealth and content of the data make this corpus a one-of-a-kind resource for the study and modeling of object affordances.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
In the longstanding effort of defining object affordances, a number of resources have been developed on objects and associated knowledge. These resources, however, have limited potential for modeling and generalization mainly due to the restricted, stimulus-bound data collection methodologies adopted. To-date, therefore, there exists no resource that truly captures object affordances in a direct, multimodal, and naturalistic way. Here, we present the first such resource of ‘thinking aloud’, spontaneously-generated verbal and motoric data on object affordances. This resource was developed from the reports of 124 participants divided into three behavioural experiments with visuo-tactile stimulation, which were captured audiovisually from two camera-views (frontal/profile). This methodology allowed the acquisition of approximately 95 hours of video, audio, and text data covering: object-feature-action data (e.g., perceptual features, namings, functions), Exploratory Acts (haptic manipulation for feature acquisition/verification), gestures and demonstrations for object/feature/action description, and reasoning patterns (e.g., justifications, analogies) for attributing a given characterization. The wealth and content of the data make this corpus a one-of-a-kind resource for the study and modeling of object affordances. |
Kavouras, Marinos; Kokla, Margarita; Tomai, Eleni; Darra, Athanasia; Katerina, Pastra GEOTHNK: A Semantic Approach to Spatial Thinking (Incollection) Progress in Cartography, pp. 319–338, Springer, 2016. (BibTeX) @incollection{kavouras2016geothnk,
title = {GEOTHNK: A Semantic Approach to Spatial Thinking},
author = {Marinos Kavouras and Margarita Kokla and Eleni Tomai and Athanasia Darra and Pastra Katerina},
year = {2016},
date = {2016-01-01},
booktitle = {Progress in Cartography},
pages = {319--338},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
|
Zlatintsi, Athanasia; Koutras, Petros; Efthymiou, Niki; Maragos, Petros; Potamianos, Alexandros; Katerina, Pastra Quality evaluation of computational models for movie summarization (Conference) 2015 Seventh International Workshop on Quality of Multimedia Experience (QoMEX), IEEE 2015. (BibTeX) @conference{zlatintsi2015quality,
title = {Quality evaluation of computational models for movie summarization},
author = {Athanasia Zlatintsi and Petros Koutras and Niki Efthymiou and Petros Maragos and Alexandros Potamianos and Pastra Katerina},
year = {2015},
date = {2015-01-01},
booktitle = {2015 Seventh International Workshop on Quality of Multimedia Experience (QoMEX)},
pages = {1--6},
organization = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
|
Vatakis, Argiro; Katerina, Pastra; Dimitrakis, P Acquisition of object knowledge through Exploratory Acts (Conference) 15th International Multisensory Research Forum (IMRF) Amsterdam, NL., 2014. (BibTeX) @conference{vatakis2014acquisition,
title = {Acquisition of object knowledge through Exploratory Acts},
author = {Argiro Vatakis and Pastra Katerina and P Dimitrakis},
year = {2014},
date = {2014-06-01},
address = {Amsterdam, NL.},
organization = {15th International Multisensory Research Forum (IMRF)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
|