@techreport{Sebernegg2020, title = "Motion Similarity Modeling - A State of the Art Report", author = "Anna Sebernegg and Peter K\'{a}n and Hannes Kaufmann", year = "2020", month = aug, number = "TR-193-02-2020-5", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", note = "human contact: technical-report@cg.tuwien.ac.at", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/Sebernegg2020/", } @article{Mirzaei_Mohammadreza_2020-EVR, title = "EarVR: Using Ear Haptics in Virtual Reality for Deaf and Hard-of-Hearing People", author = "Mohammadreza Mirzaei and Peter K\'{a}n and Hannes Kaufmann", year = "2020", abstract = "Virtual Reality (VR) has a great potential to improve skills of Deaf and Hard-of-Hearing (DHH) people. Most VR applications and devices are designed for persons without hearing problems. Therefore, DHH persons have many limitations when using VR. Adding special features in a VR environment, such as subtitles, or haptic devices will help them. Previously, it was necessary to design a special VR environment for DHH persons. We introduce and evaluate a new prototype called "EarVR" that can be mounted on any desktop or mobile VR Head-Mounted Display (HMD). EarVR analyzes 3D sounds in a VR environment and locates the direction of the sound source that is closest to a user. It notifies the user about the sound direction using two vibro-motors placed on the user's ears. EarVR helps DHH persons to complete sound-based VR tasks in any VR application with 3D audio and a mute option for background music. Therefore, DHH persons can use all VR applications with 3D audio, not only those applications designed for them. Our user study shows that DHH participants were able to complete a simple VR task significantly faster with EarVR than without. The completion time of DHH participants was very close to participants without hearing problems. Also, it shows that DHH participants were able to finish a complex VR task with EarVR, while without it, they could not finish the task even once. Finally, our qualitative and quantitative evaluation among DHH participants indicates that they preferred to use EarVR and it encouraged them to use VR technology more.", month = may, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "26", number = "05", doi = "10.1109/TVCG.2020.2973441", pages = "2084--2093", keywords = "Handicapped Aids, Haptic Interfaces, Helmet Mounted Displays, Virtual Reality, 3 D Sounds, 3 D Audio, Deaf And Hard Of Hearing People, Head Mounted Display, VR Application, Ear VR, VR Technology, Haptic Devices, DHH Persons, Hearing Problems, VR Apps.", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/Mirzaei_Mohammadreza_2020-EVR/", } @inproceedings{adolf-2019-jug, title = "Juggling in VR: Advantages of Immersive Virtual Reality in Juggling Learning", author = "Jind\v{r}ich Adolf and Peter K\'{a}n and Benjamin Outram and Hannes Kaufmann and Jarom\'{i}r Doležal and Lenka Lhotsk\'{a}", year = "2019", month = nov, publisher = "ACM", event = "25th ACM Symposium on Virtual Reality Software and Technology", booktitle = "25th ACM Symposium on Virtual Reality Software and Technology", pages = "1--5", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/adolf-2019-jug/", } @article{kan-2019-dli, title = "DeepLight: Light Source Estimation for Augmented Reality using Deep Learning", author = "Peter K\'{a}n and Hannes Kaufmann", year = "2019", abstract = "This paper presents a novel method for illumination estimation from RGB-D images. The main focus of the proposed method is to enhance visual coherence in augmented reality applications by providing accurate and temporally coherent estimates of real illumination. For this purpose, we designed and trained a deep neural network which calculates a dominant light direction from a single RGB-D image. Additionally, we propose a novel method for real-time outlier detection to achieve temporally coherent estimates. Our method for light source estimation in augmented reality was evaluated on the set of real scenes. Our results demonstrate that the neural network can successfully estimate light sources even in scenes which were not seen by the network during training. Moreover, we compared our results with illumination estimates calculated by the state-of-the-art method for illumination estimation. Finally, we demonstrate the applicability of our method on numerous augmented reality scenes.", month = jun, journal = "The Visual Computer", volume = "35", number = "6", doi = "10.1007/s00371-019-01666-x", pages = "873--883", keywords = "Light source estimation, Augmented reality, Photometric registration, Deep learning", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/kan-2019-dli/", }