@inproceedings{steinboeck-2018-lbg, title = "Casual Visual Exploration of Large Bipartite Graphs Using Hierarchical Aggregation and Filtering", author = "Daniel Steinb\"{o}ck and Eduard Gr\"{o}ller and Manuela Waldner", year = "2018", abstract = "Bipartite graphs are typically visualized using linked lists or matrices. However, these classic visualization techniques do not scale well with the number of nodes. Biclustering has been used to aggregate edges, but not to create linked lists with thousands of nodes. In this paper, we present a new casual exploration interface for large, weighted bipartite graphs, which allows for multi-scale exploration through hierarchical aggregation of nodes and edges using biclustering in linked lists. We demonstrate the usefulness of the technique using two data sets: a database of media advertising expenses of public authorities and author-keyword co-occurrences from the IEEE Visualization Publication collection. Through an insight-based study with lay users, we show that the biclustering interface leads to longer exploration times, more insights, and more unexpected findings than a baseline interface using only filtering. However, users also perceive the biclustering interface as more complex.", month = oct, organization = "IEEE", location = "Konstanz, Germany", event = "4th International Symposium on Big Data Visual and Immersive Analytics", booktitle = "International Symposium on Big Data Visual and Immersive Analytics", keywords = "information visualization, bipartite graphs, biclustering, insight-based evaluation", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/steinboeck-2018-lbg/", } @article{mazurek-2018-veq, title = "Visualizing Expanded Query Results", author = "Michael Mazurek and Manuela Waldner", year = "2018", abstract = "When performing queries in web search engines, users often face difficulties choosing appropriate query terms. Search engines therefore usually suggest a list of expanded versions of the user query to disambiguate it or to resolve potential term mismatches. However, it has been shown that users find it difficult to choose an expanded query from such a list. In this paper, we describe the adoption of set-based text visualization techniques to visualize how query expansions enrich the result space of a given user query and how the result sets relate to each other. Our system uses a linguistic approach to expand queries and topic modeling to extract the most informative terms from the results of these queries. In a user study, we compare a common text list of query expansion suggestions to three set-based text visualization techniques adopted for visualizing expanded query results – namely, Compact Euler Diagrams, Parallel Tag Clouds, and a List View – to resolve ambiguous queries using interactive query expansion. Our results show that text visualization techniques do not increase retrieval efficiency, precision, or recall. Overall, users rate Parallel Tag Clouds visualizing key terms of the expanded query space lowest. Based on the results, we derive recommendations for visualizations of query expansion results, text visualization techniques in general, and discuss alternative use cases of set-based text visualization techniques in the context of web search.", month = jun, journal = "Computer Graphics Forum", pages = "87--98", keywords = "Information visualization, search interfaces, empirical studies in visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/", } @talk{waldner-2018-ved, title = "Visual Data Exploration and Analysis in Emerging Display Environments ", author = "Manuela Waldner", year = "2018", abstract = "Increasingly powerful computing and display hardware open up entirely new ways for visual data exploration and analysis. Powerful machines and emerging display environments facilitate novel visual exploration techniques, collaborative data analysis, and even immersion into the scientific data. This talk will address the challenges we faced when bringing biomolecular visual analysis tools and complex molecular visualizations into such large, multi-user environments. A special focus lies on interfaces and attention guidance techniques we designed and evaluated to keep the user oriented and reduce visual clutter. ", month = apr, event = "Emerging Technologies in Scientific Data Visualisation - CECAM", location = "Pisa, Italy", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/waldner-2018-ved/", } @article{polatsek-2018-stv, title = "Exploring visual attention and saliency modeling for task-based visual analysis", author = "Patrik Polatsek and Manuela Waldner and Ivan Viola and Peter Kapec and Wanda Benesova", year = "2018", abstract = "Memory, visual attention and perception play a critical role in the design of visualizations. The way users observe a visualization is affected by salient stimuli in a scene as well as by domain knowledge, interest, and the task. While recent saliency models manage to predict the users’ visual attention in visualizations during exploratory analysis, there is little evidence how much influence bottom-up saliency has on task-based visual analysis. Therefore, we performed an eye-tracking study with 47 users to determine the users’ path of attention when solving three low-level analytical tasks using 30 different charts from the MASSVIS database [1]. We also compared our task-based eye tracking data to the data from the original memorability experiment by Borkin et al. [2]. We found that solving a task leads to more consistent viewing patterns compared to exploratory visual analysis. However, bottom-up saliency of a visualization has negligible influence on users’ fixations and task efficiency when performing a low-level analytical task. Also, the efficiency of visual search for an extreme target data point is barely influenced by the target’s bottom-up saliency. Therefore, we conclude that bottom-up saliency models tailored towards information visualization are not suitable for predicting visual attention when performing task-based visual analysis. We discuss potential reasons and suggest extensions to visual attention models to better account for task-based visual analysis.", month = feb, doi = "https://doi.org/10.1016/j.cag.2018.01.010", journal = "Computers & Graphics", number = "2", keywords = "Information visualization, Eye-tracking experiment, Saliency, Visual attention, Low-level analytical tasks", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/", } @talk{Waldner_2017_11, title = "Guiding Attention in Complex Visualizations using Flicker", author = "Manuela Waldner", year = "2017", abstract = "Drawing the user’s gaze to an important item in an image or a graphical user interface is a common challenge. Usually, some form of highlighting is used, such as a clearly distinct color or a border around the item. Flicker is also a strong visual attractor in the entire visual field, without distorting, suppressing, or adding any scene elements. While it is very salient, it is often perceived as annoying. In this talk, I will present our research on how flicker can be used as attention guidance technique in cluttered visualizations while lowering its negative side-effects. In particular, I will first present results of studies examining a two-stage flicker technique for dynamic visualizations on large displays. Then, I will present we our explorations of high frequency flicker (60 to 72 Hz) to guide the user’s attention in images. At such high frequencies, the critical flicker frequency (CFF) threshold is reached, which makes the flicker appear to fuse into a stable signal. However, the CFF is not uniform across the visual field, but is higher in the peripheral vision at normal lighting conditions. We show that high frequency flicker, using personalized attributes like patch size and luminance, can be easily detected by observers in the peripheral vision, but the signal is hardly visible in the foveal vision when users directly look at the flickering patch. We demonstrate that this property can be used to draw the user’s attention to important image regions using a standard high refresh-rate computer monitor with minimal visible modifications to the image.", month = nov, event = "S&T Cooperation Austria-Czech Republic", location = "Czech Technical University", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Waldner_2017_11/", } @inproceedings{geymayer-2017-std, title = "How Sensemaking Tools Influence Display Space Usage", author = "Thomas Geymayer and Manuela Waldner and Alexander Lex and Dieter Schmalstieg", year = "2017", abstract = "We explore how the availability of a sensemaking tool influences users’ knowledge externalization strategies. On a large display, users were asked to solve an intelligence analysis task with or without a bidirectionally linked concept-graph (BLC) to organize insights into concepts (nodes) and relations (edges). In BLC, both nodes and edges maintain “deep links” to the exact source phrases and sections in associated documents. In our control condition, we were able to reproduce previously described spatial organization behaviors using document windows on the large display. When using BLC, however, we found that analysts apply spatial organization to BLC nodes instead, use significantly less display space and have significantly fewer open windows.", month = jun, event = "EuroVis 2017", booktitle = "EuroVis Workshop on Visual Analytics", keywords = "sensemaking, large displays, evaluation", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/geymayer-2017-std/", } @inproceedings{waldner-2017-vph, title = "Exploring Visual Prominence of Multi-Channel Highlighting in Visualizations", author = "Manuela Waldner and Alexey Karimov and Eduard Gr\"{o}ller", year = "2017", abstract = "Visualizations make rich use of multiple visual channels so that there are few resources left to make selected focus elements visually distinct from their surrounding context. A large variety of highlighting techniques for visualizations has been presented in the past, but there has been little systematic evaluation of the design space of highlighting. We explore highlighting from the perspective of visual marks and channels – the basic building blocks of visualizations that are directly controlled by visualization designers. We present the results from two experiments, exploring the visual prominence of highlighted marks in scatterplots: First, using luminance as a single highlight channel, we found that visual prominence is mainly determined by the luminance difference between the focus mark and the brightest context mark. The brightness differences between context marks and the overall brightness level have negligible influence. Second, multi-channel highlighting using luminance and blur leads to a good trade-off between highlight effectiveness and aesthetics. From the results, we derive a simple highlight model to balance highlighting across multiple visual channels and focus and context marks, respectively.", month = may, booktitle = "Spring Conference on Computer Graphics 2017", keywords = "information visualization, highlighting, focus+context, visual prominence", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/", } @article{Waldin_Nicholas_2017_FlickerObserver, title = "Flicker Observer Effect: Guiding Attention Through High Frequency Flicker in Images", author = "Nicholas Waldin and Manuela Waldner and Ivan Viola", year = "2017", abstract = "Drawing the user's gaze to an important item in an image or a graphical user interface is a common challenge. Usually, some form of highlighting is used, such as a clearly distinct color or a border around the item. Flicker can also be very salient, but is often perceived as annoying. In this paper, we explore high frequency flicker (60 to 72 Hz) to guide the user's attention in an image. At such high frequencies, the critical flicker frequency (CFF) threshold is reached, which makes the flicker appear to fuse into a stable signal. However, the CFF is not uniform across the visual field, but is higher in the peripheral vision at normal lighting conditions. Through experiments, we show that high frequency flicker can be easily detected by observers in the peripheral vision, but the signal is hardly visible in the foveal vision when users directly look at the flickering patch. We demonstrate that this property can be used to draw the user's attention to important image regions using a standard high refresh-rate computer monitor with minimal visible modifications to the image. In an uncalibrated visual search task, users could in a crowded image easily spot the specified search targets flickering with very high frequency. They also reported that high frequency flicker was distracting when they had to attend to another region, while it was hardly noticeable when looking at the flickering region itself.", month = may, journal = "Computer Graphics Forum", volume = "36", number = "2", pages = "467--476", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Waldin_Nicholas_2017_FlickerObserver/", } @article{bernhard-2016-gft, title = " The Accuracy of Gauge-Figure Tasks in Monoscopic and Stereo Displays", author = "Matthias Bernhard and Manuela Waldner and Pascal Plank and Veronika Solteszova and Ivan Viola", year = "2016", abstract = "The gauge-figure task (GFT) is a widespread method used to study surface perception for evaluating rendering and visualization techniques. The authors investigate how accurately slant angles probed on well-defined objects align with the ground truth (GT) in monoscopic and stereoscopic displays. Their results show that the GFT probes taken with well-defined objects align well with the GT in the all-monoscopic and all-stereoscopic conditions. However, they found that a GF rendered in stereo over a monoscopic stimulus results in a strong slant underestimation and that an overestimation occurred in the inverse case (monoscopic GF andstereoscopic stimulus). They discuss how their findings affect the interpretation of absolute GFT measures, compared to the GT normal.", month = jul, journal = "IEEE Computer Graphics and Applications", number = "4", volume = "36", pages = "56--66", keywords = "computer graphics, gauge-figure task, perceptual visualization, shape perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/", } @inproceedings{Waldin_Nicholas_2016_Chameleon, title = "Chameleon Dynamic Color Mapping for Multi-Scale Structural Biology Models", author = "Nicholas Waldin and Mathieu Le Muzic and Manuela Waldner and Eduard Gr\"{o}ller and David Goodsell and Ludovic Autin and Ivan Viola", year = "2016", abstract = "Visualization of structural biology data uses color to categorize or separate dense structures into particular semantic units. In multiscale models of viruses or bacteria, there are atoms on the finest level of detail, then amino-acids, secondary structures, macromolecules, up to the compartment level and, in all these levels, elements can be visually distinguished by color. However, currently only single scale coloring schemes are utilized that show information for one particular scale only. We present a novel technology which adaptively, based on the current scale level, adjusts the color scheme to depict or distinguish the currently best visible structural information. We treat the color as a visual resource that is distributed given a particular demand. The changes of the color scheme are seamlessly interpolated between the color scheme from the previous views into a given new one. With such dynamic multi-scale color mapping we ensure that the viewer is able to distinguish structural detail that is shown on any given scale. This technique has been tested by users with an expertise in structural biology and has been overall well received.", event = "VCBM", booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/", } @inproceedings{lemuzic_2015_timelapse, title = "Illustrative Timelapse: A Technique for Illustrative Visualization of Particle Simulations on the Mesoscale Level", author = "Mathieu Le Muzic and Manuela Waldner and Julius Parulek and Ivan Viola", year = "2015", abstract = "Animated movies are a popular way to communicate complex phenomena in cell biology to the broad audience. Animation artists apply sophisticated illustration techniques to communicate a story, while trying to maintain a realistic representation of a complex dynamic environment. Since such hand-crafted animations are timeconsuming and cost-intensive to create, our goal is to formalize illustration techniques used by artists to facilitate the automatic creation of visualizations generated from mesoscale particle-based molecular simulations. Our technique Illustrative Timelapse supports visual exploration of complex biochemical processes in dynamic environments by (1) seamless temporal zooming to observe phenomena in different temporal resolutions, (2) visual abstraction of molecular trajectories to ensure that observers are able to visually follow the main actors, (3) increased visual focus on events of interest, and (4) lens effects to preserve a realistic representation of the environment in the context. Results from a first user study indicate that visual abstraction of trajectories improves the ability to follow a story and is also appreciated by users. Lens effects increased the perceived amount of molecular motion in the environment while trading off traceability of individual molecules.", month = apr, publisher = "IEEE", organization = "8th IEEE Pacific Visualization Symposium (PacificVis 2015)", location = "Zijingang Campus, Zhejiang University, Hangzhou, China", booktitle = "Visualization Symposium (PacificVis), 2015 IEEE Pacific", pages = "247--254", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/lemuzic_2015_timelapse/", } @article{waldner-2014-af, title = " Attractive Flicker: Guiding Attention in Dynamic Narrative Visualizations", author = "Manuela Waldner and Mathieu Le Muzic and Matthias Bernhard and Werner Purgathofer and Ivan Viola", year = "2014", abstract = "Focus+context techniques provide visual guidance in visualizations by giving strong visual prominence to elements of interest while the context is suppressed. However, finding a visual feature to enhance for the focus to pop out from its context in a large dynamic scene, while leading to minimal visual deformation and subjective disturbance, is challenging. This paper proposes Attractive Flicker, a novel technique for visual guidance in dynamic narrative visualizations. We first show that flicker is a strong visual attractor in the entire visual field, without distorting, suppressing, or adding any scene elements. The novel aspect of our Attractive Flicker technique is that it consists of two signal stages: The first “orientation stage” is a short but intensive flicker stimulus to attract the attention to elements of interest. Subsequently, the intensive flicker is reduced to a minimally disturbing luminance oscillation (“engagement stage”) as visual support to keep track of the focus elements. To find a good trade-off between attraction effectiveness and subjective annoyance caused by flicker, we conducted two perceptual studies to find suitable signal parameters. We showcase Attractive Flicker with the parameters obtained from the perceptual statistics in a study of molecular interactions. With Attractive Flicker, users were able to easily follow the narrative of the visualization on a large display, while the flickering of focus elements was not disturbing when observing the context.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "12", pages = "2456--2465", keywords = "Narrative Visualization, Flicker, Visual Attention", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/", } @inproceedings{waldner-2014-ghi, title = "Graphical Histories of Information Foraging", author = "Manuela Waldner and Stefan Bruckner and Ivan Viola", year = "2014", abstract = "During information foraging, knowledge workers iteratively seek, filter, read, and extract information. When using multiple information sources and different applications for information processing, re-examination of activities for validation of previous decisions or re-discovery of previously used information sources is challenging. In this paper, we present a novel representation of cross-application histories to support recall of past operations and re-discovery of information resources. Our graphical history consists of a cross-scale visualization combining an overview node-link diagram of used desktop resources with nested (animated) snapshot sequences, based on a recording of the visual screen output during the users’ desktop work. This representation makes key elements of the users’ tasks visually stand out, while exploiting the power of visual memory to recover subtle details of their activities. In a preliminary study, users found our graphical history helpful to recall details of an information foraging task and commented positively on the ability to expand overview nodes into snapshot and video sequences.", month = oct, isbn = "978-1-4503-2542-4", publisher = "ACM", organization = "NordiCHI’14 - Nordic Conference on Human-Computer Interaction", location = "Helsinki, Finland", booktitle = "Proceedings of the 8th Nordic Conference on Human-Computer Interaction: Fun, Fast, Foundational ", pages = "295--304", keywords = "Graph visualization, Interaction history, Provenance", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/", } @misc{lemuzic_2014_ipv, title = "Illustrative Visualization of Biochemical Processes Featuring Multiple Temporal Scales", author = "Mathieu Le Muzic and Julius Parulek and Manuela Waldner and Ivan Viola", year = "2014", abstract = "Scientific illustrators are commonly using structural description of molecular compounds when depicting complex biochemical processes. However, computational biology also provides procedural models describing the function of biological processes which are not currently used in the production pipeline. Instead, animators utilize scientific knowledge to manually animate and reproduce the functioning of cellular biology. We would like to explore the use of such models in order to generate explanatory illustrations that would show how molecular machinery works. Particle-based simulations provide the means for spatially representing the dynamics of biochemical processes. They compute the positions of each single particle and are supposed to mimic a realistic behaviour of the metabolites. Current mesoscale visualization also allows to directly show the results of such simulations by mapping the positions of particles in a virtual 3D environment. Nevertheless, some biochemical processes, like the DNA repair for instance, exhibit temporal multiscale aspects because they comprise diffusion rates which are much greater in comparison with reaction rates. As a result, it is challenging to produce a clear and coherent visualization out of this type of simulation. Indeed, when viewing the process at the pace which would let us see the reactions, it becomes impossible for the human eye to keep track of individual elements because of the very large diffusion displacements. On the other hand, if one would playback the simulation slow enough to be see a steady motion of individual elements, then only a very few number of reactions would occur in a reasonable amount of time. In this work we propose to solve the problem associated with multiple temporal scales by providing means for spatial. With this approach we aim at showing the two different temporal scale at the same time by using advanced trajectory smoothing mechanism. This would allow us to see individual elements while showing a world full of reactions, hence enabling us to communicate complex biological processes and molecular machineries in a comprehensive way. ", event = "Eurographics Workshop on Visual Computing for Biology", Conference date = "Poster presented at Eurographics Workshop on Visual Computing for Biology (2014-09-04--2014-09-05)", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/lemuzic_2014_ipv/", } @inproceedings{waldner-2013-ubiWM, title = "Towards Ubiquitous Information Space Management", author = "Manuela Waldner and Dieter Schmalstieg", year = "2013", abstract = "Large, high-resolution display spaces are usually created by carefully aligning multiple monitors or projectors to obtain a perfectly flat, rectangular display. In this paper, we suggest the usage of imperfect surfaces as extension of personal workspaces to create ubiquitous, personalized information spaces. We identify five environmental factors ubiquitous information spaces need to consider: 1) user location and display visibility, 2) display gaps and holes, 3) corners and non-planarity of the display surface, 4) physical objects within and around the display surface, and 5) non-rectangular display shapes. Instead of compensating for fragmentations and non-planarity of the information space, we propose a ubiquitous information space manager, adapting interaction and window rendering techniques to the above mentioned factors. We hypothesize that knowledge workers will benefit from such ubiquitous information spaces due to increased exploitation of spatial cognition. ", month = may, isbn = "978-1-4503-1952-2", publisher = "ACM", location = "Paris, France", booktitle = "POWERWALL: International Workshop on Interactive, Ultra-High-Resolution Displays, part of the SIGCHI Conference on Human Factors in Computing Systems (2013)", pages = "1--6", keywords = "information management, ubiquitous displays", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-ubiWM/", } @inproceedings{waldner-2013-facetCloudsGI, title = "FacetClouds: Exploring Tag Clouds for Multi-Dimensional Data", author = "Manuela Waldner and Johann Schrammel and Michael Klein and Katrin Kristjansdottir and Dominik Unger and Manfred Tscheligi", year = "2013", abstract = "Tag clouds are simple yet very widespread representations of how often certain words appear in a collection. In conventional tag clouds, only a single visual text variable is actively controlled: the tags’ font size. Previous work has demonstrated that font size is indeed the most influential visual text variable. However, there are other variables, such as text color, font style and tag orientation, that could be manipulated to encode additional data dimensions. FacetClouds manipulate intrinsic visual text variables to encode multiple data dimensions within a single tag cloud. We conducted a series of experiments to detect the most appropriate visual text variables for encoding nominal and ordinal values in a cloud with tags of varying font size. Results show that color is the most expressive variable for both data types, and that a combination of tag rotation and background color range leads to the best overall performance when showing multiple data dimensions in a single tag cloud. ", month = may, isbn = "978-1-4822-1680-6 ", publisher = "ACM Publishing House", organization = "ACM Siggraph", location = "Regina, Saskatchewan, Canada", address = "Regina, Saskatchewan, Canada", booktitle = "Proceedings of the 2013 Graphics Interface Conference", pages = "17--24", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/", }