@studentproject{eschner-blur-2022, title = "Generating Molecular Motion Blur Videos for a User Study", author = "Johannes Eschner", year = "2022", month = oct, URL = "https://www.cg.tuwien.ac.at/research/publications/2022/eschner-blur-2022/", } @bachelorsthesis{kristmann-2022-occ, title = "Occluder Frequency Analysis for Evaluating the Level of Visibility of Partly Occluded Objects", author = "Elias Kristmann", year = "2022", abstract = "To increase rendering efficiency of large and complex scenes, occlusion culling algorithms detect objects which are completely hidden by others and therefore do not need to be rendered. However, these methods often follow an all-or-nothing principle, either culling the geometry entirely or drawing it at full detail. This approach disregards an important subcategory of the visibility problem: detecting objects that are hardly visible because they are partly occluded and which can therefore be rendered at a lower level of detail without generating noticeable artifacts. In this thesis we assess the level of visibility of such objects by computing a hierarchical occlusion map and analysing its structure based on the frequencies of the occluders. This analysis results in a parameter that controls the level of detail (LOD) in which the geometry is rendered. The algorithm performs well even in scenes with sparse occlusion, surpassing the standard hierarchical occlusion map algorithm, with still a lot of potential for even further improvements.", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "rendering, occlusion culling, real-time", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/kristmann-2022-occ/", } @article{cardoso-2022-rtpercept, title = "Training and Predicting Visual Error for Real-Time Applications", author = "Joao Afonso Cardoso and Bernhard Kerbl and Lei Yang and Yury Uralsky and Michael Wimmer", year = "2022", abstract = "Visual error metrics play a fundamental role in the quantification of perceived image similarity. Most recently, use cases for them in real-time applications have emerged, such as content-adaptive shading and shading reuse to increase performance and improve efficiency. A wide range of different metrics has been established, with the most sophisticated being capable of capturing the perceptual characteristics of the human visual system. However, their complexity, computational expense, and reliance on reference images to compare against prevent their generalized use in real-time, restricting such applications to using only the simplest available metrics. In this work, we explore the abilities of convolutional neural networks to predict a variety of visual metrics without requiring either reference or rendered images. Specifically, we train and deploy a neural network to estimate the visual error resulting from reusing shading or using reduced shading rates. The resulting models account for 70%--90% of the variance while achieving up to an order of magnitude faster computation times. Our solution combines image-space information that is readily available in most state-of-the-art deferred shading pipelines with reprojection from previous frames to enable an adequate estimate of visual errors, even in previously unseen regions. We describe a suitable convolutional network architecture and considerations for data preparation for training. We demonstrate the capability of our network to predict complex error metrics at interactive rates in a real-time application that implements content-adaptive shading in a deferred pipeline. Depending on the portion of unseen image regions, our approach can achieve up to 2x performance compared to state-of-the-art methods.", month = may, journal = "Proceedings of the ACM on Computer Graphics and Interactive Techniques", volume = "5", number = "1", issn = "2577-6193", doi = "10.1145/3522625", pages = "17", publisher = "Association for Computing Machinery", pages = "1--17", keywords = "perceptual error, variable rate shading, real-time", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/cardoso-2022-rtpercept/", } @inproceedings{stappen_SteFAS, title = "Temporally Stable Content-Adaptive and Spatio-Temporal Shading Rate Assignment for Real-Time Applications", author = "Stefan Stappen and Johannes Unterguggenberger and Bernhard Kerbl and Michael Wimmer", year = "2021", abstract = "We propose two novel methods to improve the efficiency and quality of real-time rendering applications: Texel differential-based content-adaptive shading (TDCAS) and spatio-temporally filtered adaptive shading (STeFAS). Utilizing Variable Rate Shading (VRS)-a hardware feature introduced with NVIDIA's Turing micro-architecture-and properties derived during rendering or Temporal Anti-Aliasing (TAA), our techniques adapt the resolution to improve the performance and quality of real-time applications. VRS enables different shading resolution for different regions of the screen during a single render pass. In contrast to other techniques, TDCAS and STeFAS have very little overhead for computing the shading rate. STeFAS enables up to 4x higher rendering resolutions for similar frame rates, or a performance increase of 4× at the same resolution.", month = oct, isbn = "978-3-03868-162-5", publisher = "Eurographics Association", organization = "The Eurographics Association", location = "online", event = "Pacific Graphics 2021", editor = "Lee, Sung-Hee and Zollmann, Stefanie and Okabe, Makoto and W\"{u}nsche, Burkhard", doi = "10.2312/pg.20211391", booktitle = "Pacific Graphics Short Papers, Posters, and Work-in-Progress Papers", pages = "2", pages = "65--66", keywords = "variable rate shading, temporal antialiasing", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/stappen_SteFAS/", } @inproceedings{grossmann-2021-layout, title = "Does the Layout Really Matter? A Study on Visual Model Accuracy Estimation", author = "Nicolas Grossmann and J\"{u}rgen Bernard and Michael Sedlmair and Manuela Waldner", year = "2021", abstract = "In visual interactive labeling, users iteratively assign labels to data items until the machine model reaches an acceptable accuracy. A crucial step of this process is to inspect the model's accuracy and decide whether it is necessary to label additional elements. In scenarios with no or very little labeled data, visual inspection of the predictions is required. Similarity-preserving scatterplots created through a dimensionality reduction algorithm are a common visualization that is used in these cases. Previous studies investigated the effects of layout and image complexity on tasks like labeling. However, model evaluation has not been studied systematically. We present the results of an experiment studying the influence of image complexity and visual grouping of images on model accuracy estimation. We found that users outperform traditional automated approaches when estimating a model's accuracy. Furthermore, while the complexity of images impacts the overall performance, the layout of the items in the plot has little to no effect on estimations.", month = oct, publisher = "IEEE Computer Society Press", event = "IEEE Visualization Conference (VIS)", doi = "10.1109/VIS49827.2021.9623326", booktitle = "IEEE Visualization Conference (VIS)", pages = "5", pages = "61--65", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/", } @inproceedings{panfili-2021-myop, title = "Myopia in Head-Worn Virtual Reality", author = "Lara Panfili and Michael Wimmer and Katharina Kr\"{o}sl", year = "2021", abstract = "In this work, we investigate the influence of myopia on the perceived visual acuity (VA) in head-worn virtual reality (VR). Factors such as display resolution or vision capabilities of users influence the VA in VR. We simulated eyesight tests in VR and on a desktop screen and conducted a user study comparing VA measurements of participants with normal sight and participants with myopia. Surprisingly, our results suggest that people with severe myopia can see better in VR than in the real world, while the VA of people with normal or corrected sight or mild myopia is reduced in VR.", month = mar, booktitle = "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", doi = "10.1109/VRW52623.2021.00197", isbn = "978-1-6654-1166-0", location = "online", publisher = "IEEE Computer Society Press", pages = "2", pages = "629--630", keywords = "visual impairments", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/panfili-2021-myop/", } @article{luidolt-2020-lightperceptionVR, title = "Gaze-Dependent Simulation of Light Perception in Virtual Reality", author = "Laura R. Luidolt and Michael Wimmer and Katharina Kr\"{o}sl", year = "2020", abstract = "The perception of light is inherently different inside a virtual reality (VR) or augmented reality (AR) simulation when compared to the real world. Conventional head-worn displays (HWDs) are not able to display the same high dynamic range of brightness and color as the human eye can perceive in the real world. To mimic the perception of real-world scenes in virtual scenes, it is crucial to reproduce the effects of incident light on the human visual system. In order to advance virtual simulations towards perceptual realism, we present an eye-tracked VR/AR simulation comprising effects for gaze-dependent temporal eye adaption, perceptual glare, visual acuity reduction, and scotopic color vision. Our simulation is based on medical expert knowledge and medical studies of the healthy human eye. We conducted the first user study comparing the perception of light in a real-world low-light scene to a VR simulation. Our results show that the proposed combination of simulated visual effects is well received by users and also indicate that an individual adaptation is necessary, because perception of light is highly subjective.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "Volume 26, Issue 12", issn = "1077-2626", doi = "10.1109/TVCG.2020.3023604", pages = "3557--3567", keywords = "perception, virtual reality, user studies", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/", } @inproceedings{schindler_2020vis, title = "The Anatomical Edutainer", author = "Marwin Schindler and Hsiang-Yun Wu and Renata Raidou", year = "2020", abstract = "Physical visualizations (i.e., data representations by means of physical objects) have been used for many centuries in medical and anatomical education. Recently, 3D printing techniques started also to emerge. Still, other medical physicalizations that rely on affordable and easy-to-find materials are limited, while smart strategies that take advantage of the optical properties of our physical world have not been thoroughly investigated. We propose the Anatomical Edutainer, a workflow to guide the easy, accessible, and affordable generation of physicalizations for tangible, interactive anatomical edutainment. The Anatomical Edutainer supports 2D printable and 3D foldable physicalizations that change their visual properties (i.e., hues of the visible spectrum) under colored lenses or colored lights, to reveal distinct anatomical structures through user interaction.", month = oct, event = "IEEE Vis 2020", booktitle = "IEEE Vis Short Papers 2020", pages = "1--5", keywords = "Data Physicalization, Medical Visualization, Anatomical Education", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/schindler_2020vis/", } @inproceedings{kroesl-2020-XREye, title = "XREye: Simulating Visual Impairments in Eye-Tracked XR ", author = "Katharina Kr\"{o}sl and Carmine Elvezio and Matthias H\"{u}rbe and Sonja Karst and Steven Feiner and Michael Wimmer", year = "2020", abstract = "Many people suffer from visual impairments, which can be difficult for patients to describe and others to visualize. To aid in understanding what people with visual impairments experience, we demonstrate a set of medically informed simulations in eye-tracked XR of several common conditions that affect visual perception: refractive errors (myopia, hyperopia, and presbyopia), cornea disease, and age-related macular degeneration (wet and dry).", month = mar, booktitle = "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", location = "(Atlanta) online", publisher = "IEEE", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/", } @mastersthesis{Luidolt-2020-DA, title = "Perception of Light in Virtual Reality", author = "Laura R. Luidolt", year = "2020", abstract = "The perception of light and light incidence in the human eye is substantially different in real-world scenarios and virtual reality (VR) simulations. Standard low dynamic range displays, as used in common VR headsets, are not able to replicate the same light intensities we see in reality. Therefore, light phenomenons, such as temporal eye adaptation, perceptual glare, visual acuity reduction and scotopic color vision need to be simulated to generate realistic images. Even though, a physically based simulation of these effects could increase the perceived reality of VR applications, this topic has not been thoroughly researched yet. We propose a post-processing workflow for VR and augmented reality (AR), using eye tracking, that is based on medical studies of the healthy human eye and is able to run in real time, to simulate light effects as close to reality as possible. We improve an existing temporal eye adaptation algorithm to be view-dependent. We adapt a medically based glare simulation to run in VR and AR. Additionally, we add eye tracking to adjust the glare intensity according to the viewing direction and the glare appearance depending on the user’s pupil size. We propose a new function fit for the reduction of visual acuity in VR head mounted displays. Finally, we include scotopic color vision for more realistic rendering of low-light scenes. We conducted a primarily qualitative pilot study, comparing a real-world low-light scene to our VR simulation through individual, perceptual evaluation. Most participants mentioned, that the simulation of temporal eye adaptation, visual acuity reduction and scotopic color vision was similar or the same as their own perception in the real world. However, further work is necessary to improve the appearance and movement of our proposed glare kernel. We conclude, that our work has laid a ground base for further research regarding the simulation and individual adaptation to the perception of light in VR.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", keywords = "perception, temporal eye adaptation, glare, virtual reality, scotopic vision, visual acuity reduction, augmented reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/Luidolt-2020-DA/", } @bachelorsthesis{panfili-2019-VAVR, title = "Effects of VR-Displays on Visual Acuity", author = "Lara Panfili", year = "2019", abstract = "The perceived visual acuity (VA) of people in virtual reality (VR), using a head-mounted display (HMD), is not equal to their VA in the real world. The reason for this difference is the reduction of visual acuity in the virtual environment that is caused by various factors, such as the low resolution of the VR display. Based on those circumstances, the capacity of an individual to distinguish small details diminishes visibly. Previous studies regarding eyesight in VR have already verified how the best visual resolution in virtual environments is always lower than the natural vision and therefore this aspect could be seen as a mild vision impairment for the users of an HMD. The goal of this project is to investigate how much the VA is reduced in VR and respectively whether the decrease of VA in VR is perceived similar by everyone or if visual impairments like Myopia, influence the visual perception. Based on a previous project, two different tests were implemented with the game engine Unreal Engine 4, a VR version for which an HTC VIVE headset was used, along with a desktop version. These tests were used to investigate the VA of the participant in a user study and the results have been compared to each other in order to find the extent to which visual impairments have an impact on VA. ", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "virtual reality, visual acuity", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/panfili-2019-VAVR/", } @article{waldner-2019-rld, title = "A Comparison of Radial and Linear Charts for Visualizing Daily Patterns", author = "Manuela Waldner and Alexandra Diehl and Denis Gracanin and Rainer Splechtna and Claudio Delrieux and Kresimir Matkovic", year = "2019", abstract = "Radial charts are generally considered less effective than linear charts. Perhaps the only exception is in visualizing periodical time-dependent data, which is believed to be naturally supported by the radial layout. It has been demonstrated that the drawbacks of radial charts outweigh the benefits of this natural mapping. Visualization of daily patterns, as a special case, has not been systematically evaluated using radial charts. In contrast to yearly or weekly recurrent trends, the analysis of daily patterns on a radial chart may benefit from our trained skill on reading radial clocks that are ubiquitous in our culture. In a crowd-sourced experiment with 92 non-expert users, we evaluated the accuracy, efficiency, and subjective ratings of radial and linear charts for visualizing daily traffic accident patterns. We systematically compared juxtaposed 12-hours variants and single 24-hours variants for both layouts in four low-level tasks and one high-level interpretation task. Our results show that over all tasks, the most elementary 24-hours linear bar chart is most accurate and efficient and is also preferred by the users. This provides strong evidence for the use of linear layouts – even for visualizing periodical daily patterns.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "26", doi = "10.1109/TVCG.2019.2934784", pages = "1033--1042", keywords = "radial charts, time series data, daily patterns, crowd-sourced experiment", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/", } @studentproject{koch-2019-PR, title = "Simulation of Diabetic Macular Edema in Virtual Reality", author = "Thomas Bernhard Koch", year = "2019", abstract = "Simulation of diabetic macular edema (DME) is implemented in a virtual reality simulation using Unreal Engine 4. Common symptoms of DME are blurry vision, loss of contrast, floaters and distorted vision. We use different computer graphics techniques to create effects which resemble such symptoms. An eye tracker from Pupil Labs is used in order to make effects gaze dependent. The implementation of these effects is discussed and adjustable parameters of the effects are explained.", month = aug, note = "1", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/koch-2019-PR/", } @inproceedings{kroesl-2019-ThesisFF, title = "Simulating Vision Impairments in VR and AR", author = "Katharina Kr\"{o}sl", year = "2019", abstract = "1.3 billion people worldwide are affected by vision impairments, according to the World Health Organization. However, vision impairments are hardly ever taken into account when we design our cities, buildings, emergency signposting, or lighting systems. With this research, we want to develop realistic, medically based simulations of eye diseases in VR and AR, which allow calibrating vision impairments to the same level for different users. This allows us to conduct user studies with participants with normal sight and graphically simulated vision impairments, to determine the effects of these impairments on perception, and to investigate lighting concepts under impaired vision conditions. This thesis will, for the first time, provide methods for architects and designers to evaluate their designs for accessibility and to develop lighting systems that can enhance the perception of people with vision impairments.", month = jun, booktitle = "ACM SIGGRAPH THESIS FAST FORWARD 2019", keywords = "vision impairments, cataracts, virtual reality, augmented reality, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/", } @mastersthesis{plank-2017-sldg, title = "Effective Line Drawing Generation", author = "Pascal Plank", year = "2019", abstract = "Advanced rendering algorithms such as suggestive contours are able to depict objects in the style of line drawings with various levels of detail. How to select an appropriate level of detail is based on visual aesthetics rather than on substantial characteristics like the accuracy of 3D shape perception. The aim of this thesis is to develop a novel approach for effectively generating line drawings in the style of suggestive contours that are optimized for human 3D shape perception while retaining the amount of ink to a minimum. The proposed post-processing meta-heuristic for optimizing line drawings uses empirical thresholds based on probing human shape perception. The heuristic can also be used to optimize line drawings in terms of other visual characteristics, e.g., cognitive load, and for other line drawings styles such as ridges and valleys. The optimization routine is based on a conducted perceptual user study using the gauge figure task to collect more than 17, 000 high-quality user estimates of surface normals from suggestive contours renderings. By analyzing these data points, more in-depth understanding of how humans perceive 3D shape from line drawings is gained. Particularly the accuracy of 3D shape perception and shape ambiguity in regards to changing the level of detail and type of object presented is investigated. In addition, the collected data points are used to calculate two pixel-based perceptual characteristics: the optimal size of a local neighborhood area to estimate 3D shape from and the optimal local ink percentage in this area. In the analysis, a neighborhood size of 36 pixels with an optimal ink percentage of 17.3% could be identified. These thresholds are used to optimize suggestive contours renderings in a post-processing stage using a greedy nearest neighbor optimization scheme. The proposed meta-heuristic procedure yields visually convincing results where each pixel value is close to the identified thresholds. In terms of practical application, the optimization scheme can be used in areas where high 3D shape understanding is essential such as furniture manuals or architectural renderings. Both the empirical results regarding shape understanding as well as the practical applications of the thesis’s results form the basis to optimize other line drawing methods and to understand better how humans perceive shape from lines.", month = may, pages = "84", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", keywords = "visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/plank-2017-sldg/", } @inproceedings{Vasylevska_Khrystyna-2019-TEFVR, title = "Towards Eye-Friendly VR: How Bright Should It Be?", author = "Khrystyna Vasylevska and Hyunjin Yoo and Tara Akhavan and Hannes Kaufmann", year = "2019", abstract = "Visual information plays an important part in the perception of the world around us. Recently, head-mounted displays (HMD) came to the consumer market and became a part of the everyday life of thousands of people. Like with the desktop screens or hand-held devices before, the public is concerned with the possible health consequences of the prolonged usage and question the adequacy of the default settings. It has been shown that the brightness and contrast of a display should be adjusted to match the external light to decrease eye strain and other symptoms. Currently, there is a noticeable mismatch in brightness between the screen and dark background of an HMD that might cause eye strain, insomnia, and other unpleasant symptoms. In this paper, we explore the possibility to significantly lower the screen brightness in the HMD and successfully compensate for the loss of the visual information on a dimmed screen. We designed a user study to explore the connection between the screen brightness HMD and task performance, cybersickness, users’ comfort, and preferences. We have tested three levels of brightness: the default Full Brightness, the optional Night Mode and a significantly lower brightness with original content and compensated content. Our results suggest that although users still prefer the brighter setting, the HMDs can be successfully used with significantly lower screen brightness, especially if the low screen brightness is compensated", month = mar, publisher = "IEEE", location = "Osaka, Japan", issn = "2642-5246 ", event = "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", doi = "10.1109/VR.2019.8797752", booktitle = "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", pages = "1--9", keywords = "Virtual Reality, User Study, Perception, Head-Mounted Display", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/Vasylevska_Khrystyna-2019-TEFVR/", } @mastersthesis{schuller_reichl-2019-avt, title = "Mapping of Realism in Rendering onto Perception of Presence in Augmented Reality", author = "David Sch\"{u}ller-Reichl", year = "2019", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/schuller_reichl-2019-avt/", } @inproceedings{kroesl-2019-ICthroughVR, title = "ICthroughVR: Illuminating Cataracts through Virtual Reality", author = "Katharina Kr\"{o}sl and Carmine Elvezio and Matthias H\"{u}rbe and Sonja Karst and Michael Wimmer and Steven Feiner", year = "2019", abstract = "Vision impairments, such as cataracts, affect how many people interact with their environment, yet are rarely considered by architects and lighting designers because of a lack of design tools. To address this, we present a method to simulate vision impairments caused by cataracts in virtual reality (VR), using eye tracking for gaze-dependent effects. We conducted a user study to investigate how lighting affects visual perception for users with cataracts. Unlike past approaches, we account for the user's vision and some constraints of VR headsets, allowing for calibration of our simulation to the same level of degraded vision for all participants.", month = mar, publisher = "IEEE", location = "Osaka, Japan", event = "IEEE VR 2019, the 26th IEEE Conference on Virtual Reality and 3D User Interfaces", doi = "10.1109/VR.2019.8798239", booktitle = "2019 IEEE Conference on Virtual Reality and 3D User Interfaces", pages = "655--663", keywords = "vision impairments, cataracts, virtual reality, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/", } @mastersthesis{trautner-2018-imd, title = "Importance-Driven Exploration of Molecular Dynamics Simulations", author = "Thomas Trautner", year = "2018", abstract = "The aim of this thesis is a novel real-time visualization approach for exploring molecular dynamics (MD-)simulations. Through the constantly improving hardware and everincreasing computing power, MD-simulations are more easily available. Additionally, they consist of hundreds, thousands or even millions of individual simulation frames and are getting more and more detailed. The calculation of such simulations is no longer limited by algorithms or hardware, nevertheless it is still not possible to efficiently explore this huge amount of simulation data, as animated 3D visualization, with ordinary and well established visualization tools. Using current software tools, the exploration of such long simulations takes too much time and due to the complexity of large molecular scenes, the visualizations highly suffer from visual clutter. It is therefore very likely that the user will miss important events. Therefore, we designed a focus & context approach for MD-simulations that guides the user to the most relevant temporal and spatial events, and it is no longer necessary to explore the simulation in a linear fashion. Our contribution can be divided into the following four topics: 1. Spatial importance through different levels of detail. Depending on the type of research task, different geometrical representations can be selected for both, focusand context elements. 2. Importance driven visibility management through ghosting, to prevent context elements from occluding focus elements. 3. Temporal importance through adaptive fast-forward. The playback speed of the simulation is thereby dependent on a single or a combination of multiple importance functions. 4. Visual declutter of accumulated frames through motion blur, which additionally illustrates the playback speed-up. Since the very beginning, this work was developed in close cooperation with biochemists from the Loschmidt Laboratories in Brno, Czech Republic. Together, we analyzed different use cases demonstrating the flexibility of our novel focus & context approach. ", month = oct, pages = "100", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "molecular dynamics simulation, realtime visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/trautner-2018-imd/", } @inproceedings{kroesl-2018-DC, title = "[DC] Computational Design of Smart Lighting Systems for Visually Impaired People, using VR and AR Simulations", author = "Katharina Kr\"{o}sl", year = "2018", abstract = "This Doctoral Consortium paper presents my dissertation research in a multidisciplinary setting, spanning over the areas of architecture, specifically lighting design and building information modeling, to virtual reality (VR) and perception. Since vision impairments are hardly taken into account in architecture and lighting design today, this research aims to provide the necessary tools to quantify the effects of vision impairments, so design guidelines regarding these impairments can be developed. Another research goal is the determination of the influence of different lighting conditions on the perception of people with vision impairments. This would allow us to develop smart lighting systems that can aid visually impaired people by increasing their visual perception of their environment. This paper also outlines the concept for a tool to automatically generate lighting solutions and compare and test them in VR, as design aid for architects and lighting designers.", month = oct, publisher = "IEEE", location = "Munich", event = "ISMAR 2018", booktitle = "Proceedings of the 2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", keywords = "vision impairments, lighting design, virtual reality, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-DC/", } @misc{kroesl-2018-TVS, title = "The Virtual Schoolyard: Attention Training in Virtual Reality for Children with Attentional Disorders", author = "Katharina Kr\"{o}sl and Anna Felnhofer and Johanna X. Kafka and Laura Schuster and Alexandra Rinnerthaler and Michael Wimmer and Oswald D. Kothgassner", year = "2018", abstract = "This work presents a virtual reality simulation for training different attentional abilities in children and adolescents. In an interdisciplinary project between psychology and computer science, we developed four mini-games that are used during therapy sessions to battle different aspects of attentional disorders. First experiments show that the immersive game-like application is well received by children. Our tool is also currently part of a treatment program in an ongoing clinical study.", month = aug, publisher = "ACM", location = "Vancouver, Canada", isbn = "978-1-4503-5817-0", event = "ACM SIGGRAPH 2018", doi = "10.1145/3230744.3230817", Conference date = "Poster presented at ACM SIGGRAPH 2018 (2018-08-12--2018-08-16)", note = "Article 27--", pages = "Article 27 – ", keywords = "virtual reality, attentional disorders, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/", } @article{HECHER-2017-HDY, title = "How Do Users Map Points Between Dissimilar Shapes?", author = "Michael Hecher and Paul Guerrero and Peter Wonka and Michael Wimmer", year = "2018", abstract = "Finding similar points in globally or locally similar shapes has been studied extensively through the use of various point descriptors or shape-matching methods. However, little work exists on finding similar points in dissimilar shapes. In this paper, we present the results of a study where users were given two dissimilar two-dimensional shapes and asked to map a given point in the first shape to the point in the second shape they consider most similar. We find that user mappings in this study correlate strongly with simple geometric relationships between points and shapes. To predict the probability distribution of user mappings between any pair of simple two-dimensional shapes, two distinct statistical models are defined using these relationships. We perform a thorough validation of the accuracy of these predictions and compare our models qualitatively and quantitatively to well-known shape-matching methods. Using our predictive models, we propose an approach to map objects or procedural content between different shapes in different design scenarios.", month = aug, doi = "10.1109/TVCG.2017.2730877", issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "8", volume = "24", pages = "2327--2338", keywords = "shape matching, transformations, shape similarity", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/HECHER-2017-HDY/", } @article{Kathi-2018-VRB, title = "A VR-based user study on the effects of vision impairments on recognition distances of escape-route signs in buildings", author = "Katharina Kr\"{o}sl and Dominik Bauer and Michael Schw\"{a}rzler and Henry Fuchs and Michael Wimmer and Georg Suter", year = "2018", abstract = "In workplaces or publicly accessible buildings, escape routes are signposted according to official norms or international standards that specify distances, angles and areas of interest for the positioning of escape-route signs. In homes for the elderly, in which the residents commonly have degraded mobility and suffer from vision impairments caused by age or eye diseases, the specifications of current norms and standards may be insufficient. Quantifying the effect of symptoms of vision impairments like reduced visual acuity on recognition distances is challenging, as it is cumbersome to find a large number of user study participants who suffer from exactly the same form of vision impairments. Hence, we propose a new methodology for such user studies: By conducting a user study in virtual reality (VR), we are able to use participants with normal or corrected sight and simulate vision impairments graphically. The use of standardized medical eyesight tests in VR allows us to calibrate the visual acuity of all our participants to the same level, taking their respective visual acuity into account. Since we primarily focus on homes for the elderly, we accounted for their often limited mobility by implementing a wheelchair simulation for our VR application.", month = apr, journal = "The Visual Computer", volume = "34", number = "6-8", issn = "0178-2789", doi = "10.1007/s00371-018-1517-7", pages = "911--923", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Kathi-2018-VRB/", } @article{polatsek-2018-stv, title = "Exploring visual attention and saliency modeling for task-based visual analysis", author = "Patrik Polatsek and Manuela Waldner and Ivan Viola and Peter Kapec and Wanda Benesova", year = "2018", abstract = "Memory, visual attention and perception play a critical role in the design of visualizations. The way users observe a visualization is affected by salient stimuli in a scene as well as by domain knowledge, interest, and the task. While recent saliency models manage to predict the users’ visual attention in visualizations during exploratory analysis, there is little evidence how much influence bottom-up saliency has on task-based visual analysis. Therefore, we performed an eye-tracking study with 47 users to determine the users’ path of attention when solving three low-level analytical tasks using 30 different charts from the MASSVIS database [1]. We also compared our task-based eye tracking data to the data from the original memorability experiment by Borkin et al. [2]. We found that solving a task leads to more consistent viewing patterns compared to exploratory visual analysis. However, bottom-up saliency of a visualization has negligible influence on users’ fixations and task efficiency when performing a low-level analytical task. Also, the efficiency of visual search for an extreme target data point is barely influenced by the target’s bottom-up saliency. Therefore, we conclude that bottom-up saliency models tailored towards information visualization are not suitable for predicting visual attention when performing task-based visual analysis. We discuss potential reasons and suggest extensions to visual attention models to better account for task-based visual analysis.", month = feb, doi = "https://doi.org/10.1016/j.cag.2018.01.010", journal = "Computers & Graphics", number = "2", keywords = "Information visualization, Eye-tracking experiment, Saliency, Visual attention, Low-level analytical tasks", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/", } @phdthesis{waldin-2017-thesis, title = "Using and Adapting to Limits of Human Perception in Visualization", author = "Nicholas Waldin", year = "2017", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/waldin-2017-thesis/", } @mastersthesis{KREUZER-2017-PBF, title = "Using Perception-Based Filtering to Hide Shadow Artifacts", author = "Felix Kreuzer", year = "2017", abstract = "Shadows are an indispensable aid for understanding spatial relations of objects in natural scenes, which is why they are very important for real-time rendering applications. Combining filtering techniques with shadow mapping is a common tool to simulate visually-pleasing shadows in interactive applications. A positive effect of such approaches is that the filtering blurs aliasing artifacts caused by sampling the discretized geometric data stored in the shadow map, thereby improving the visual quality of the shadow. The goal of this thesis is to exploit common filtering algorithms, in order to find a function of blur radius and shadow-map sampling frequency, which allows for optimized computational performance while mostly preserving the visual quality of the shadow. In the course of this work, we investigate how shadow artifacts arise and how to hide them. We set up and execute a user study to find the optimal relation between the shadow-map sampling frequency and the filter radius. From the results of the user study, we derive a formula and develop an algorithm that can be incorporated into existing shadow-mapping algorithms. We evaluate our results by applying the algorithm to a custom-made rendering framework and observe an increase in processing speeds.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/KREUZER-2017-PBF/", } @article{mindek-2017-dsn, title = "Data-Sensitive Visual Navigation", author = "Peter Mindek and Gabriel Mistelbauer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2017", abstract = "In visualization systems it is often the case that the changes of the input parameters are not proportional to the visual change of the generated output. In this paper, we propose a model for enabling data-sensitive navigation for user-interface elements. This model is applied to normalize the user input according to the visual change, and also to visually communicate this normalization. In this way, the exploration of heterogeneous data using common interaction elements can be performed in an efficient way. We apply our model to the field of medical visualization and present guided navigation tools for traversing vascular structures and for camera rotation around 3D volumes. The presented examples demonstrate that the model scales to user-interface elements where multiple parameters are set simultaneously.", month = oct, journal = "Computers & Graphics", volume = "67", number = "C", pages = "77--85", keywords = "navigation, exploration, medical visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-dsn/", } @mastersthesis{ERLER-2017-HVR, title = "Haptic Feedback in Room-Scale VR", author = "Philipp Erler", year = "2017", abstract = "Virtual reality (VR) is now becoming a mainstream medium. Current systems like the HTC Vive offer accurate tracking of the HMD and controllers, which allows for highly immersive interactions with the virtual environment. The interactions can be further enhanced by adding feedback. As an example, a controller can vibrate when it is close to a grabbable ball. As such interactions are not exhaustingly researched, we conducted a user study. Specifically, we examine: - grabbing and throwing with controllers in a simple basketball game. - the influence of haptic and optical feedback on performance, presence, task load, and usability. - the advantages of VR over desktop for point-cloud editing. Several new techniques emerged from the point-cloud editor for VR. The bi-manual pinch gesture, which extends the handlebar metaphor, is a novel viewing method used to translate, rotate, and scale the point-cloud. Our new rendering technique uses the geometry shader to draw sparse point clouds quickly. The selection volumes at the controllers are our new technique to efficiently select points in point clouds. The resulting selection is visualized in real time. The results of the user study show that: - grabbing with a controller button is intuitive but throwing is not. Releasing a button is a bad metaphor for releasing a grabbed virtual object in order to throw it. - any feedback is better than none. Adding haptic, optical, or both feedback types to the grabbing improves the user performance and presence. However, only sub-scores like accuracy and predictability are significantly improved. Usability and task load are mostly unaffected by feedback. - the point-cloud editing is significantly better in VR with the bi-manual pinch gesture and selection volumes than on the desktop with the orbiting camera and lasso selections. ", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "virtual reality, room-scale VR, throwing, grabbing, physics, basketball, haptic feedback, optical feedback, controllers, point cloud, point-cloud editing, presence, performance, usability, task load", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/ERLER-2017-HVR/", } @inproceedings{waldner-2017-vph, title = "Exploring Visual Prominence of Multi-Channel Highlighting in Visualizations", author = "Manuela Waldner and Alexey Karimov and Eduard Gr\"{o}ller", year = "2017", abstract = "Visualizations make rich use of multiple visual channels so that there are few resources left to make selected focus elements visually distinct from their surrounding context. A large variety of highlighting techniques for visualizations has been presented in the past, but there has been little systematic evaluation of the design space of highlighting. We explore highlighting from the perspective of visual marks and channels – the basic building blocks of visualizations that are directly controlled by visualization designers. We present the results from two experiments, exploring the visual prominence of highlighted marks in scatterplots: First, using luminance as a single highlight channel, we found that visual prominence is mainly determined by the luminance difference between the focus mark and the brightest context mark. The brightness differences between context marks and the overall brightness level have negligible influence. Second, multi-channel highlighting using luminance and blur leads to a good trade-off between highlight effectiveness and aesthetics. From the results, we derive a simple highlight model to balance highlighting across multiple visual channels and focus and context marks, respectively.", month = may, booktitle = "Spring Conference on Computer Graphics 2017", keywords = "information visualization, highlighting, focus+context, visual prominence", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/", } @article{Waldin_Nicholas_2017_FlickerObserver, title = "Flicker Observer Effect: Guiding Attention Through High Frequency Flicker in Images", author = "Nicholas Waldin and Manuela Waldner and Ivan Viola", year = "2017", abstract = "Drawing the user's gaze to an important item in an image or a graphical user interface is a common challenge. Usually, some form of highlighting is used, such as a clearly distinct color or a border around the item. Flicker can also be very salient, but is often perceived as annoying. In this paper, we explore high frequency flicker (60 to 72 Hz) to guide the user's attention in an image. At such high frequencies, the critical flicker frequency (CFF) threshold is reached, which makes the flicker appear to fuse into a stable signal. However, the CFF is not uniform across the visual field, but is higher in the peripheral vision at normal lighting conditions. Through experiments, we show that high frequency flicker can be easily detected by observers in the peripheral vision, but the signal is hardly visible in the foveal vision when users directly look at the flickering patch. We demonstrate that this property can be used to draw the user's attention to important image regions using a standard high refresh-rate computer monitor with minimal visible modifications to the image. In an uncalibrated visual search task, users could in a crowded image easily spot the specified search targets flickering with very high frequency. They also reported that high frequency flicker was distracting when they had to attend to another region, while it was hardly noticeable when looking at the flickering region itself.", month = may, journal = "Computer Graphics Forum", volume = "36", number = "2", pages = "467--476", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Waldin_Nicholas_2017_FlickerObserver/", } @bachelorsthesis{Koszticsak-2017-ewt, title = "Generating Expressive Window Thumbnails through Seam Carving", author = "Rebeka Koszticsak", year = "2017", abstract = "Thumbnails are used to display lists of open windows or tabs when switching between them on computers and on mobile devices. These images make it easier to recognize the opened applications, and help to find the needed window quicker. Thumbnails however only display a screenshot of the windows, so they get potentially confusing if there are more opened windows or if the same application is opened multiple times. Depending on the resolution of the display, the screenshot size decreases as the number of opened windows increases. Furthermore, within the same application (like MS Office World) the screenshots are similar in appearance (e.g. : white paper and tool bar), but the important text is not readable. There are several approaches that filter the important areas of the images to enhance the main region. In this bachelor thesis an application is implemented that uses the above methods on screenshots. Screenshots of windows are reduced by cropping the irrelevant elements of the margin area using seam carving, i.e. by eliminating the non-important pixel paths; and by common down-sampling. As a result the thumbnails show only relevant information, which makes them more expressive and easier to fulfill their purpose.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Koszticsak-2017-ewt/", } @article{Groeller_2016_P7, title = "Depth functions as a quality measure and for steering multidimensional projections", author = "Douglas Cedrim and Viktor Vad and Afonso Paiva and Eduard Gr\"{o}ller and Luis Gustavo Nonato and Antonio Castelo", year = "2016", abstract = "The analysis of multidimensional data has been a topic of continuous research for many years.This type of data can be found inseveral different areas ofscience. The analysis of multidimensional data has been a topic of continuous research for many years. This type of data can be found in several different areas of science. A common task while analyzing such data is to investigate patterns by interacting with spatializations of the data in a visual domain. Understanding the relation between the underlying dataset characteristics and the technique used to provide its visual representation is of fundamental importance since it can provide a better intuition on what to expect from the spatialization. In this paper, we propose the usage of concepts from non-parametric statistics, namely depth functions, as a quality measure for spatializations. We evaluate the action of multi-dimensional projection techniques on such estimates. We apply both qualitative and quantitative ana-lyses on four different multidimensional techniques selected according to the properties they aim to preserve. We evaluate them with datasets of different characteristics: synthetic, real world, high dimensional; and contaminated with outliers. As a straightforward application, we propose to use depth information to guide multidimensional projection techniques which rely on interaction through control point selection and positioning. Even for techniques which do not intend to preserve any centrality measure, interesting results can be achieved by separating regions possibly contaminated with outliers. ", month = nov, journal = "Computers & Graphics (Special Section on SIBGRAPI 2016)", volume = "60", issn = "doi: 10.1016/j.cag.2016.08.008", pages = "93--106", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P7/", } @inproceedings{Reichinger-2016-spaghetti, title = "Spaghetti, Sink and Sarcophagus: Design Explorations of Tactile Artworks for Visually Impaired People", author = "Andreas Reichinger and Werner Purgathofer", year = "2016", month = oct, event = "9th Nordic Conference on CHI 2016", booktitle = "Proceedings of the 9th Nordic Conference on CHI 2016", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Reichinger-2016-spaghetti/", } @article{bernhard-2016-gft, title = " The Accuracy of Gauge-Figure Tasks in Monoscopic and Stereo Displays", author = "Matthias Bernhard and Manuela Waldner and Pascal Plank and Veronika Solteszova and Ivan Viola", year = "2016", abstract = "The gauge-figure task (GFT) is a widespread method used to study surface perception for evaluating rendering and visualization techniques. The authors investigate how accurately slant angles probed on well-defined objects align with the ground truth (GT) in monoscopic and stereoscopic displays. Their results show that the GFT probes taken with well-defined objects align well with the GT in the all-monoscopic and all-stereoscopic conditions. However, they found that a GF rendered in stereo over a monoscopic stimulus results in a strong slant underestimation and that an overestimation occurred in the inverse case (monoscopic GF andstereoscopic stimulus). They discuss how their findings affect the interpretation of absolute GFT measures, compared to the GT normal.", month = jul, journal = "IEEE Computer Graphics and Applications", number = "4", volume = "36", pages = "56--66", keywords = "computer graphics, gauge-figure task, perceptual visualization, shape perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/", } @habilthesis{viola-evr, title = "Effective Visual Representations", author = "Ivan Viola", year = "2016", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/viola-evr/", } @article{Waldin_Nicholas_2016_Colormaps, title = "Personalized 2D color maps", author = "Nicholas Waldin and Matthias Bernhard and Ivan Viola", year = "2016", abstract = "2D color maps are often used to visually encode complex data characteristics such as heat or height. The comprehension of color maps in visualization is affected by the display (e.g., a monitor) and the perceptual abilities of the viewer. In this paper we present a novel method to measure a user׳s ability to distinguish colors of a two-dimensional color map on a given monitor. We show how to adapt the color map to the user and display to optimally compensate for the measured deficiencies. Furthermore, we improve user acceptance of the calibration procedure by transforming the calibration into a game. The user has to sort colors along a line in a 3D color space in a competitive fashion. The errors the user makes in sorting these lines are used to adapt the color map to his perceptual capabilities.", issn = "0097-8493", journal = "Computers & Graphics", volume = "59", pages = "143--150", keywords = "Color; Perception, Perception, Color vision deficiency", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Colormaps/", } @inproceedings{Waldin_Nicholas_2016_Individualization, title = "Individualization of 2D Color Maps for People with Color Vision Deficiencies", author = "Nicholas Waldin and Matthias Bernhard and Peter Rautek and Ivan Viola", year = "2016", location = "Slomenice, Slovakia", booktitle = "Proceedings of the 32Nd Spring Conference on Computer Graphics", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Individualization/", } @article{waldner-2014-af, title = " Attractive Flicker: Guiding Attention in Dynamic Narrative Visualizations", author = "Manuela Waldner and Mathieu Le Muzic and Matthias Bernhard and Werner Purgathofer and Ivan Viola", year = "2014", abstract = "Focus+context techniques provide visual guidance in visualizations by giving strong visual prominence to elements of interest while the context is suppressed. However, finding a visual feature to enhance for the focus to pop out from its context in a large dynamic scene, while leading to minimal visual deformation and subjective disturbance, is challenging. This paper proposes Attractive Flicker, a novel technique for visual guidance in dynamic narrative visualizations. We first show that flicker is a strong visual attractor in the entire visual field, without distorting, suppressing, or adding any scene elements. The novel aspect of our Attractive Flicker technique is that it consists of two signal stages: The first “orientation stage” is a short but intensive flicker stimulus to attract the attention to elements of interest. Subsequently, the intensive flicker is reduced to a minimally disturbing luminance oscillation (“engagement stage”) as visual support to keep track of the focus elements. To find a good trade-off between attraction effectiveness and subjective annoyance caused by flicker, we conducted two perceptual studies to find suitable signal parameters. We showcase Attractive Flicker with the parameters obtained from the perceptual statistics in a study of molecular interactions. With Attractive Flicker, users were able to easily follow the narrative of the visualization on a large display, while the flickering of focus elements was not disturbing when observing the context.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "12", pages = "2456--2465", keywords = "Narrative Visualization, Flicker, Visual Attention", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/", } @article{birkeland_aasmund_2014_pums, title = "Perceptually Uniform Motion Space", author = "{\AA}smund Birkeland and Cagatay Turkay and Ivan Viola", year = "2014", abstract = "Flow data is often visualized by animated particles inserted into a ?ow ?eld. The velocity of a particle on the screen is typically linearly scaled by the velocities in the data. However, the perception of velocity magnitude in animated particles is not necessarily linear. We present a study on how different parameters affect relative motion perception. We have investigated the impact of four parameters. The parameters consist of speed multiplier, direction, contrast type and the global velocity scale. In addition, we investigated if multiple motion cues, and point distribution, affect the speed estimation. Several studies were executed to investigate the impact of each parameter. In the initial results, we noticed trends in scale and multiplier. Using the trends for the signi?cant parameters, we designed a compensation model, which adjusts the particle speed to compensate for the effect of the parameters. We then performed a second study to investigate the performance of the compensation model. From the second study we detected a constant estimation error, which we adjusted for in the last study. In addition, we connect our work to established theories in psychophysics by comparing our model to a model based on Stevens’ Power Law.", month = nov, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "11", issn = "1077-2626", pages = "1542--1554", keywords = "motion visualization, motion perception, animation, evauation, perceptual model", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/birkeland_aasmund_2014_pums/", } @article{bernhard-2014-GTOM, title = "Gaze-To-Object Mapping During Visual Search in 3D Virtual Environments ", author = "Matthias Bernhard and Efstathios Stavrakis and Michael Hecher and Michael Wimmer", year = "2014", abstract = "Stimuli obtained from highly dynamic 3D virtual environments and synchronous eye-tracking data are commonly used by algorithms that strive to correlate gaze to scene objects, a process referred to as Gaze-To-Object Mapping (GTOM). We propose to address this problem with a probabilistic approach using Bayesian inference. The desired result of the inference is a predicted probability density function (PDF) specifying for each object in the scene a probability to be attended by the user. To evaluate the quality of a predicted attention PDF, we present a methodology to assess the information value (i.e., likelihood) in the predictions of dierent approaches that can be used to infer object attention. To this end, we propose an experiment based on a visual search task which allows us to determine the object of attention at a certain point in time under controlled conditions. We perform this experiment with a wide range of static and dynamic visual scenes to obtain a ground-truth evaluation data set, allowing us to assess GTOM techniques in a set of 30 particularly challenging cases.", month = aug, journal = "ACM Transactions on Applied Perception (Special Issue SAP 2014)", volume = "11", number = "3", issn = "1544-3558", pages = "14:1--14:17", keywords = "object-based attention, eye-tracking, virtual environments, visual attention", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/", } @article{hecher-2014-MH, title = "A Comparative Perceptual Study of Soft Shadow Algorithms", author = "Michael Hecher and Matthias Bernhard and Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2014", abstract = "We performed a perceptual user study of algorithms that approximate soft shadows in real time. Although a huge body of soft-shadow algorithms have been proposed, to our knowledge this is the first methodical study for comparing different real-time shadow algorithms with respect to their plausibility and visual appearance. We evaluated soft-shadow properties like penumbra overlap with respect to their relevance to shadow perception in a systematic way, and we believe that our results can be useful to guide future shadow approaches in their methods of evaluation. In this study, we also capture the predominant case of an inexperienced user observing shadows without comparing to a reference solution, such as when watching a movie or playing a game. One important result of this experiment is to scientifically verify that real-time soft-shadow algorithms, despite having become physically based and very realistic, can nevertheless be intuitively distinguished from a correct solution by untrained users.", month = jun, issn = "1544-3558", journal = "ACM Transactions on Applied Perception", number = "5", volume = "11", pages = "5:1--5:21", keywords = "Perception Studies, Soft Shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/", } @inproceedings{bernhard-2014-EFD, title = "The Effects of Fast Disparity Adjustments in Gaze-Controlled Stereoscopic Applications", author = "Matthias Bernhard and Camillo Dellmour and Michael Hecher and Efstathios Stavrakis and Michael Wimmer", year = "2014", abstract = "With the emergence of affordable 3D displays, stereoscopy is becoming a commodity. However, often users report discomfort even after brief exposures to stereo content. One of the main reasons is the conflict between vergence and accommodation that is caused by 3D displays. We investigate dynamic adjustment of stereo parameters in a scene using gaze data in order to reduce discomfort. In a user study, we measured stereo fusion times after abrupt manipulation of disparities using gaze data. We found that gaze-controlled manipulation of disparities can lower fusion times for large disparities. In addition we found that gaze-controlled disparity adjustment should be applied in a personalized manner and ideally performed only at the extremities or outside the comfort zone of subjects. These results provide important insight on the problems associated with fast disparity manipulation and are essential for developing appealing gaze-contingent and gaze-controlled applications.", month = mar, isbn = "978-1-4503-2751-0", publisher = "ACM", location = "Safety Harbor, FL, USA", editor = "Pernilla Qvarfordt and Dan Witzner Hansen", booktitle = "Proceedings of the Symposium on Eye Tracking Research and Applications (ETRA 2014)", pages = "111--118", keywords = "stereoscopic rendering, comfort models, fusion time, eye tracking", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-EFD/", } @inproceedings{waldner-2013-facetCloudsGI, title = "FacetClouds: Exploring Tag Clouds for Multi-Dimensional Data", author = "Manuela Waldner and Johann Schrammel and Michael Klein and Katrin Kristjansdottir and Dominik Unger and Manfred Tscheligi", year = "2013", abstract = "Tag clouds are simple yet very widespread representations of how often certain words appear in a collection. In conventional tag clouds, only a single visual text variable is actively controlled: the tags’ font size. Previous work has demonstrated that font size is indeed the most influential visual text variable. However, there are other variables, such as text color, font style and tag orientation, that could be manipulated to encode additional data dimensions. FacetClouds manipulate intrinsic visual text variables to encode multiple data dimensions within a single tag cloud. We conducted a series of experiments to detect the most appropriate visual text variables for encoding nominal and ordinal values in a cloud with tags of varying font size. Results show that color is the most expressive variable for both data types, and that a combination of tag rotation and background color range leads to the best overall performance when showing multiple data dimensions in a single tag cloud. ", month = may, isbn = "978-1-4822-1680-6 ", publisher = "ACM Publishing House", organization = "ACM Siggraph", location = "Regina, Saskatchewan, Canada", address = "Regina, Saskatchewan, Canada", booktitle = "Proceedings of the 2013 Graphics Interface Conference", pages = "17--24", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/", } @incollection{sundstedt-2013-vag, title = "Visual Attention and Gaze Behaviour in Games: An Object-Based Approach", author = "Veronica Sundstedt and Matthias Bernhard and Efstathios Stavrakis and Erik Reinhard and Michael Wimmer", year = "2013", abstract = "This chapter presents state-of-the-art methods that tap the potential of psychophysics for the purpose of understanding game players' behavior. Studying gaze behavior in gaming environments has recently gained momentum as it affords a better understanding of gamers' visual attention. However, while knowing where users are attending in a computer game would be useful at a basic level, it does not provide insight into what users are interested in, or why. An answer to these questions can be tremendously useful to game designers, enabling them to improve gameplay, selectively increase visual fidelity, and optimize the distribution of computing resources. Furthermore, this could be useful in verifying game mechanics, improving game AI and smart positioning of advertisements within games, all being applications widely desirable across the games industry. Techniques are outlined to collect gaze data, and map fixation points back to semantic objects in a gaming environment, enabling a deeper understanding of how players interact with games. ", month = apr, booktitle = "Game Analytics: Maximizing the Value of Player Data ", editor = "M. Seif El-Nasr, A. Drachen, A. Canossa, K. Isbister,", isbn = "9781447147688", publisher = "Springer", keywords = "Eye Tracking, Visual Attention, Computer Games", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/sundstedt-2013-vag/", } @mastersthesis{hecher-2012-MH, title = "A Comparative Perceptual Study of Soft Shadow Algorithms", author = "Michael Hecher", year = "2012", abstract = "While a huge body of soft shadow algorithms has been proposed, there has been no methodical study for comparing different real-time shadowing algorithms with respect to their plausibility and visual appearance. Therefore, a study was designed to identify and evaluate scene properties with respect to their relevance to shadow quality perception. Since there are so many factors that might influence perception of soft shadows (e.g., complexity of objects, movement, and textures), the study was designed and executed in a way on which future work can build on. The evaluation concept not only captures the predominant case of an untrained user experiencing shadows without comparing them to a reference solution, but also the cases of trained and experienced users. We achieve this by reusing the knowledge users gain during the study. Moreover, we thought that the common approach of a two-option forced-choice-study can be frustrating for participants when both choices are so similar that people think they are the same. To tackle this problem a neutral option was provided. For time-consuming studies, where frustrated participants tend to arbitrary choices, this is a useful concept. Speaking with participants after the study and evaluating the results, supports our choice for a third option. The results are helpful to guide the design of future shadow algorithms and allow researchers to evaluate algorithms more effectively. They also allow developers to make better performance versus quality decisions for their applications. One important result of this study is that we can scientifically verify that, without comparison to a reference solution, the human perception is relatively indifferent to a correct soft shadow. Hence, a simple but robust soft shadow algorithm is the better choice in real-world situations. Another finding is that approximating contact hardening in soft shadows is sufficient for the average user and not significantly worse for experts.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Perception Studies, Soft Shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/hecher-2012-MH/", } @inproceedings{bernhard-2011-maicg, title = "Manipulating Attention in Computer Games", author = "Matthias Bernhard and Le Zhang and Michael Wimmer", year = "2011", abstract = "In computer games, a user’s attention is focused on the current task, and task-irrelevant details remain unnoticed. This behavior, known as inattentional blindness, is a main problem for the optimal placement of information or advertisements. We propose a guiding principle based on Wolfe’s theory of Guided Search, which predicts the saliency of objects during a visual search task. Assuming that computer games elicit visual search tasks frequently, we applied this model in a “reverse” direction: Given a target item (e.g., advertisement) which should be noticed by the user, we choose a frequently searched game item and modify it so that it shares some perceptual features (e.g., color or orientation) with the target item. A memory experiment with 36 participants showed that in an action video game, advertisements were more noticeable to users when this method is applied.", month = jun, isbn = "9781457712852", publisher = "IEEE", location = "Ithaca, NY", booktitle = "Proceedings of the IEEE IVMSP Workshop on Perception and Visual Signal Analysis", pages = "153--158", keywords = "saliency, attention guidance, inattentional blindness, in-game advertising, guided search", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/bernhard-2011-maicg/", }