@article{grossmann-2022-conceptSplatters, title = "Concept splatters: Exploration of latent spaces based on human interpretable concepts", author = "Nicolas Grossmann and Eduard Gr\"{o}ller and Manuela Waldner", year = "2022", abstract = "Similarity maps show dimensionality-reduced activation vectors of a high number of data points and thereby can help to understand which features a neural network has learned from the data. However, similarity maps have severely limited expressiveness for large datasets with hundreds of thousands of data instances and thousands of labels, such as ImageNet or word2vec. In this work, we present “concept splatters” as a scalable method to interactively explore similarities between data instances as learned by the machine through the lens of human-understandable semantics. Our approach enables interactive exploration of large latent spaces on multiple levels of abstraction. We present a web-based implementation that supports interactive exploration of tens of thousands of word vectors of word2vec and CNN feature vectors of ImageNet. In a qualitative study, users could effectively discover spurious learning strategies of the network, ambiguous labels, and could characterize reasons for potential confusion.", month = apr, doi = "10.1016/j.cag.2022.04.013", issn = "1873-7684", journal = "Computers and Graphics", pages = "12", volume = "105", publisher = "Elsevier", pages = "73--84", keywords = "Concept spaces, Latent spaces, Similarity maps, Visual exploratory analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/grossmann-2022-conceptSplatters/", } @article{Alharbi_2021, title = "Nanotilus: Generator of Immersive Guided-Tours in Crowded 3D Environments", author = "Ruwayda Alharbi and Ondrej Strnad and Laura R. Luidolt and Manuela Waldner and David Kou\v{r}il and Ciril Bohak and Tobias Klein and Eduard Gr\"{o}ller and Ivan Viola", year = "2021", abstract = "Immersive virtual reality environments are gaining popularity for studying and exploring crowded three-dimensional structures. When reaching very high structural densities, the natural depiction of the scene produces impenetrable clutter and requires visibility and occlusion management strategies for exploration and orientation. Strategies developed to address the crowdedness in desktop applications, however, inhibit the feeling of immersion. They result in nonimmersive, desktop-style outside-in viewing in virtual reality. This paper proposesNanotilus---a new visibility and guidance approach for very dense environments that generates an endoscopic inside-out experience instead of outside-in viewing, preserving the immersive aspect of virtual reality. The approach consists of two novel, tightly coupled mechanisms that control scene sparsification simultaneously with camera path planning. The sparsification strategy is localized around the camera and is realized as a multiscale, multishell, variety-preserving technique. When Nanotilus dives into the structures to capture internal details residing on multiple scales, it guides the camera using depth-based path planning. In addition to sparsification and path planning, we complete the tour generation with an animation controller, textual annotation, and text-to-visualization conversion. We demonstrate the generated guided tours on mesoscopic biological models -- SARS-CoV-2 and HIV viruses. We evaluate the Nanotilus experience with a baseline outside-in sparsification and navigational technique in a formal user study with 29 participants. While users can maintain a better overview using the outside-in sparsification, the study confirms our hypothesis that Nanotilus leads to stronger engagement and immersion.", month = dec, doi = "10.1109/TVCG.2021.3133592", journal = "IEEE Transactions on Visualization and Computer Graphics", pages = "1--16", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/Alharbi_2021/", } @article{sorger-2021-egonet, title = "Egocentric Network Exploration for Immersive Analytics", author = "Johannes Sorger and Alessio Arleo and Peter K\'{a}n and Wolfgang Knecht and Manuela Waldner", year = "2021", abstract = "To exploit the potential of immersive network analytics for engaging and effective exploration, we promote the metaphor of ``egocentrism'', where data depiction and interaction are adapted to the perspective of the user within a 3D network. Egocentrism has the potential to overcome some of the inherent downsides of virtual environments, e.g., visual clutter and cyber-sickness. To investigate the effect of this metaphor on immersive network exploration, we designed and evaluated interfaces of varying degrees of egocentrism. In a user study, we evaluated the effect of these interfaces on visual search tasks, efficiency of network traversal, spatial orientation, as well as cyber-sickness. Results show that a simple egocentric interface considerably improves visual search efficiency and navigation performance, yet does not decrease spatial orientation or increase cyber-sickness. A distorted occlusion-free view of the neighborhood only marginally improves the user's performance. We tie our findings together in an open online tool for egocentric network exploration, providing actionable insights on the benefits of the egocentric network exploration metaphor.", month = oct, journal = "Computer Graphics Forum", volume = "40", doi = "10.1111/cgf.14417", pages = "12", publisher = "John Wiley and Sons", pages = "241--252", keywords = "Computer Graphics and Computer-Aided Design", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/sorger-2021-egonet/", } @article{sietzen-2021-perturber, title = "Interactive Analysis of CNN Robustness", author = "Stefan Sietzen and Mathias Lechner and Judy Borowski and Ramin Hasani and Manuela Waldner", year = "2021", abstract = "While convolutional neural networks (CNNs) have found wide adoption as state-of-the-art models for image-related tasks, their predictions are often highly sensitive to small input perturbations, which the human vision is robust against. This paper presents Perturber, a web-based application that allows users to instantaneously explore how CNN activations and predictions evolve when a 3D input scene is interactively perturbed. Perturber offers a large variety of scene modifications, such as camera controls, lighting and shading effects, background modifications, object morphing, as well as adversarial attacks, to facilitate the discovery of potential vulnerabilities. Fine-tuned model versions can be directly compared for qualitative evaluation of their robustness. Case studies with machine learning experts have shown that Perturber helps users to quickly generate hypotheses about model vulnerabilities and to qualitatively compare model behavior. Using quantitative analyses, we could replicate users' insights with other CNN architectures and input images, yielding new insights about the vulnerability of adversarially trained models. ", month = oct, journal = "Computer Graphics Forum", volume = "40", doi = "10.1111/cgf.14418", pages = "12", publisher = "John Wiley and Sons", pages = "253--264", keywords = "Computer Graphics and Computer-Aided Design", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/", } @inproceedings{grossmann-2021-layout, title = "Does the Layout Really Matter? A Study on Visual Model Accuracy Estimation", author = "Nicolas Grossmann and J\"{u}rgen Bernard and Michael Sedlmair and Manuela Waldner", year = "2021", abstract = "In visual interactive labeling, users iteratively assign labels to data items until the machine model reaches an acceptable accuracy. A crucial step of this process is to inspect the model's accuracy and decide whether it is necessary to label additional elements. In scenarios with no or very little labeled data, visual inspection of the predictions is required. Similarity-preserving scatterplots created through a dimensionality reduction algorithm are a common visualization that is used in these cases. Previous studies investigated the effects of layout and image complexity on tasks like labeling. However, model evaluation has not been studied systematically. We present the results of an experiment studying the influence of image complexity and visual grouping of images on model accuracy estimation. We found that users outperform traditional automated approaches when estimating a model's accuracy. Furthermore, while the complexity of images impacts the overall performance, the layout of the items in the plot has little to no effect on estimations.", month = oct, publisher = "IEEE Computer Society Press", event = "IEEE Visualization Conference (VIS)", doi = "10.1109/VIS49827.2021.9623326", booktitle = "IEEE Visualization Conference (VIS)", pages = "5", pages = "61--65", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/", } @article{wu-2021-vi, title = "Visualization working group at TU Wien: Visibile Facimus Quod Ceteri Non Possunt", author = "Hsiang-Yun Wu and Aleksandr Amirkhanov and Nicolas Grossmann and Tobias Klein and David Kou\v{r}il and Haichao Miao and Laura R. Luidolt and Peter Mindek and Renata Raidou and Ivan Viola and Manuela Waldner and Eduard Gr\"{o}ller", year = "2021", abstract = "Building-up and running a university-based research group is a multi-faceted undertaking. The visualization working group at TU Wien (vis-group) has been internationally active over more than 25 years. The group has been acting in a competitive scientific setting where sometimes contradicting multiple objectives require trade-offs and optimizations. Research-wise the group has been performing basic and applied research in visualization and visual computing. Teaching-wise the group has been involved in undergraduate and graduate lecturing in (medical) visualization and computer graphics. To be scientifically competitive requires to constantly expose the group and its members to a strong international competition at the highest level. This necessitates to shield the members against the ensuing pressures and demands and provide (emotional) support and encouragement. Internally, the vis-group has developed a unique professional and social interaction culture: work and celebrate, hard and together. This has crystallized into a nested, recursive, and triangular organization model, which concretizes what it takes to make a research group successful. The key elements are the creative and competent vis-group members who collaboratively strive for (scientific) excellence in a socially enjoyable environment.", month = mar, doi = "https://doi.org/10.1016/j.visinf.2021.02.003", journal = "Visual Informatics", volume = "5", pages = "76--84", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/wu-2021-vi/", } @article{waldner-2021-leo, title = "Linking unstructured evidence to structured observations", author = "Manuela Waldner and Thomas Geymayer and Dieter Schmalstieg and Michael Sedlmair", year = "2021", abstract = "Many professionals, like journalists, writers, or consultants, need to acquire information from various sources, make sense of this unstructured evidence, structure their observations, and finally create and deliver their product, such as a report or a presentation. In formative interviews, we found that tools allowing structuring of observations are often disconnected from the corresponding evidence. Therefore, we designed a sensemaking environment with a flexible observation graph that visually ties together evidence in unstructured documents with the user’s structured knowledge. This is achieved through bi-directional deep links between highlighted document portions and nodes in the observation graph. In a controlled study, we compared users’ sensemaking strategies using either the observation graph or a simple text editor on a large display. Results show that the observation graph represents a holistic, compact representation of users’ observations, which can be linked to unstructured evidence on demand. In contrast, users taking textual notes required much more display space to spatially organize source documents containing unstructured evidence. This implies that spatial organization is a powerful strategy to structure observations even if the available space is limited.", month = jan, doi = "https://doi.org/10.1177/1473871620986249", journal = "Information Visualization", keywords = "mind map, concept map, observation graph, visual links, sensemaking", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/waldner-2021-leo/", } @article{waldner-2020-tbg, title = "Interactive exploration of large time-dependent bipartite graphs", author = "Manuela Waldner and Daniel Steinb\"{o}ck and Eduard Gr\"{o}ller", year = "2020", abstract = "Bipartite graphs are typically visualized using linked lists or matrices, but these visualizations neither scale well nor do they convey temporal development. We present a new interactive exploration interface for large, time-dependent bipartite graphs. We use two clustering techniques to build a hierarchical aggregation supporting different exploration strategies. Aggregated nodes and edges are visualized as linked lists with nested time series. We demonstrate two use cases: finding advertising expenses of public authorities following similar temporal patterns and comparing author-keyword co-occurrences across time. Through a user study, we show that linked lists with hierarchical aggregation lead to more insights than without.", month = apr, doi = "https://doi.org/10.1016/j.cola.2020.100959", journal = "Journal of Computer Languages", volume = "57", keywords = "Information visualization, Bipartite graphs, Clustering, Time series data, Insight-based evaluation", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/waldner-2020-tbg/", } @article{reina-2020-mtv, title = "The moving target of visualization software for an increasingly complex world", author = "Guido Reina and Hank Childs and Kresimir Matkovic and Katja B\"{u}hler and Manuela Waldner and David Pugmire and Barbora Kozlikova and Timo Ropinski and Patric Ljung and Takayuki Itoh and Eduard Gr\"{o}ller and Michael Krone", year = "2020", abstract = "Visualization has evolved into a mature scientific field and it has also become widely accepted as a standard approach in diverse fields, including physics, life sciences, and business intelligence. However, despite its successful development, there are still many open research questions that require customized implementations in order to explore and establish concepts, and to perform experiments and take measurements. Many methods and tools have been developed and published but most are stand-alone prototypes and have not reached a mature state that can be used in a reliable manner by collaborating domain scientists or a wider audience. In this study, we discuss the challenges, solutions, and open research questions that affect the development of sophisticated, relevant, and novel scientific visualization solutions with minimum overheads. We summarize and discuss the results of a recent National Institute of Informatics Shonan seminar on these topics.", month = apr, doi = "https://doi.org/10.1016/j.cag.2020.01.005", journal = "Computers & Graphics", volume = "87", pages = "12--29", keywords = "Software engineering, Visualization, Visualization community, Visualization research, Visualization software", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/reina-2020-mtv/", } @inproceedings{sorger-2019-odn, title = "Immersive Analytics of Large Dynamic Networks via Overview and Detail Navigation", author = "Johannes Sorger and Manuela Waldner and Wolfgang Knecht and Alessio Arleo", year = "2019", abstract = "Analysis of large dynamic networks is a thriving research field, typically relying on 2D graph representations. The advent of affordable head mounted displays sparked new interest in the potential of 3D visualization for immersive network analytics. Nevertheless, most solutions do not scale well with the number of nodes and edges and rely on conventional fly- or walk-through navigation. In this paper, we present a novel approach for the exploration of large dynamic graphs in virtual reality that interweaves two navigation metaphors: overview exploration and immersive detail analysis. We thereby use the potential of state-of-the-art VR headsets, coupled with a web-based 3D rendering engine that supports heterogeneous input modalities to enable ad-hoc immersive network analytics. We validate our approach through a performance evaluation and a case study with experts analyzing medical data.", month = dec, organization = "IEEE", location = "San Diego, California, USA", event = "AIVR 2019", booktitle = "2nd International Conference on Artificial Intelligence & Virtual Reality", pages = "144--151", keywords = "Immersive Network Analytics, Web-Based Visualization, Dynamic Graph Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/sorger-2019-odn/", } @article{waldner-2019-rld, title = "A Comparison of Radial and Linear Charts for Visualizing Daily Patterns", author = "Manuela Waldner and Alexandra Diehl and Denis Gracanin and Rainer Splechtna and Claudio Delrieux and Kresimir Matkovic", year = "2019", abstract = "Radial charts are generally considered less effective than linear charts. Perhaps the only exception is in visualizing periodical time-dependent data, which is believed to be naturally supported by the radial layout. It has been demonstrated that the drawbacks of radial charts outweigh the benefits of this natural mapping. Visualization of daily patterns, as a special case, has not been systematically evaluated using radial charts. In contrast to yearly or weekly recurrent trends, the analysis of daily patterns on a radial chart may benefit from our trained skill on reading radial clocks that are ubiquitous in our culture. In a crowd-sourced experiment with 92 non-expert users, we evaluated the accuracy, efficiency, and subjective ratings of radial and linear charts for visualizing daily traffic accident patterns. We systematically compared juxtaposed 12-hours variants and single 24-hours variants for both layouts in four low-level tasks and one high-level interpretation task. Our results show that over all tasks, the most elementary 24-hours linear bar chart is most accurate and efficient and is also preferred by the users. This provides strong evidence for the use of linear layouts – even for visualizing periodical daily patterns.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "26", doi = "10.1109/TVCG.2019.2934784", pages = "1033--1042", keywords = "radial charts, time series data, daily patterns, crowd-sourced experiment", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/", } @inproceedings{sietzen-ifv-2019, title = "Interactive Feature Visualization in the Browser", author = "Stefan Sietzen and Manuela Waldner", year = "2019", abstract = "Excellent explanations of feature visualization already exist in the form of interactive articles, e.g. DeepDream, Feature Visualization, The Building Blocks of Interpretability, Activation Atlas, Visualizing GoogLeNet Classes. They mostly rely on curated prerendered visualizations, additionally providing colab notebooks or public repositories allowing the reader to reproduce those results. While precalculated visualizations have many advantages (directability, more processing budget), they are always discretized samples of a continuous parameter space. In the spirit of Tensorflow Playground, this project aims at providing a fully interactive interface to some basic functionality of the originally Python-based Lucid library, roughly corresponding to the concepts presented in the “Feature Visualization" article. The user is invited to explore the effect of parameter changes in a playful way and without requiring any knowledge of programming, enabled by an implementation on top of TensorFlow.js. Live updates of the generated input image as well as feature map activations should give the user a visual intuition to the otherwise abstract optimization process. Further, this interface opens the domain of feature visualization to non-experts, as no scripting is required.", month = oct, booktitle = "Proceedings of the Workshop on Visualization for AI explainability (VISxAI)", editor = "El-Assady, Mennatallah and Chau, Duen Horng (Polo) and Hohman, Fred and Perer, Adam and Strobelt, Hendrik and Vi\'{e}gas, Fernanda", location = "Vancouver", event = "Workshop on Visualization for AI explainability (VISxAI) at IEEE VIS 2019", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/sietzen-ifv-2019/", } @techreport{2019-ic, title = "Collecting and Structuring Information in the Information Collage", author = "Sebastian Sippl and Michael Sedlmair and Manuela Waldner", year = "2019", abstract = "Knowledge workers, such as scientists, journalists, or consultants, adaptively seek, gather, and consume information. These processes are often inefficient as existing user interfaces provide limited possibilities to combine information from various sources and different formats into a common knowledge representation. In this paper, we present the concept of an information collage (IC) -- a web browser extension combining manual spatial organization of gathered information fragments and automatic text analysis for interactive content exploration and expressive visual summaries. We used IC for case studies with knowledge workers from different domains and longer-term field studies over a period of one month. We identified three different ways how users collect and structure information and provide design recommendations how to support these observed usage strategies. ", month = aug, number = "TR-193-02-2019-2", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", note = "human contact: technical-report@cg.tuwien.ac.at", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/2019-ic/", } @article{byska-2019-mdfc, title = "Analysis of Long Molecular Dynamics Simulations Using Interactive Focus+Context Visualization", author = "Jan Byska and Thomas Trautner and Sergio Marques and Jiri Damborsky and Barbora Kozlikova and Manuela Waldner", year = "2019", abstract = "Analyzing molecular dynamics (MD) simulations is a key aspect to understand protein dynamics and function. With increasing computational power, it is now possible to generate very long and complex simulations, which are cumbersome to explore using traditional 3D animations of protein movements. Guided by requirements derived from multiple focus groups with protein engineering experts, we designed and developed a novel interactive visual analysis approach for long and crowded MD simulations. In this approach, we link a dynamic 3D focus+context visualization with a 2D chart of time series data to guide the detection and navigation towards important spatio-temporal events. The 3D visualization renders elements of interest in more detail and increases the temporal resolution dependent on the time series data or the spatial region of interest. In case studies with different MD simulation data sets and research questions, we found that the proposed visual analysis approach facilitates exploratory analysis to generate, confirm, or reject hypotheses about causalities. Finally, we derived design guidelines for interactive visual analysis of complex MD simulation data.", month = jun, journal = "Computer Graphics Forum", volume = "38", number = "3", doi = "10.1111/cgf.13701", pages = "441--453", keywords = "scientific visualization, user centered design", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/", } @article{waldin-2019-ccm, title = "Cuttlefish: Color Mapping for Dynamic Multi‐Scale Visualizations", author = "Nicholas Waldin and Manuela Waldner and Mathieu Le Muzic and Eduard Gr\"{o}ller and David Goodsell and Ludovic Autin and Arthur Olson and Ivan Viola", year = "2019", abstract = "Visualizations of hierarchical data can often be explored interactively. For example, in geographic visualization, there are continents, which can be subdivided into countries, states, counties and cities. Similarly, in models of viruses or bacteria at the highest level are the compartments, and below that are macromolecules, secondary structures (such as α‐helices), amino‐acids, and on the finest level atoms. Distinguishing between items can be assisted through the use of color at all levels. However, currently, there are no hierarchical and adaptive color mapping techniques for very large multi‐scale visualizations that can be explored interactively. We present a novel, multi‐scale, color‐mapping technique for adaptively adjusting the color scheme to the current view and scale. Color is treated as a resource and is smoothly redistributed. The distribution adjusts to the scale of the currently observed detail and maximizes the color range utilization given current viewing requirements. Thus, we ensure that the user is able to distinguish items on any level, even if the color is not constant for a particular feature. The coloring technique is demonstrated for a political map and a mesoscale structural model of HIV. The technique has been tested by users with expertise in structural biology and was overall well received.", month = mar, doi = "10.1111/cgf.13611", journal = "Computer Graphics Forum", number = "6", volume = "38", pages = "150--164", keywords = "multiscale visualization, illustrative visualization, molecular visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/waldin-2019-ccm/", } @inproceedings{steinboeck-2018-lbg, title = "Casual Visual Exploration of Large Bipartite Graphs Using Hierarchical Aggregation and Filtering", author = "Daniel Steinb\"{o}ck and Eduard Gr\"{o}ller and Manuela Waldner", year = "2018", abstract = "Bipartite graphs are typically visualized using linked lists or matrices. However, these classic visualization techniques do not scale well with the number of nodes. Biclustering has been used to aggregate edges, but not to create linked lists with thousands of nodes. In this paper, we present a new casual exploration interface for large, weighted bipartite graphs, which allows for multi-scale exploration through hierarchical aggregation of nodes and edges using biclustering in linked lists. We demonstrate the usefulness of the technique using two data sets: a database of media advertising expenses of public authorities and author-keyword co-occurrences from the IEEE Visualization Publication collection. Through an insight-based study with lay users, we show that the biclustering interface leads to longer exploration times, more insights, and more unexpected findings than a baseline interface using only filtering. However, users also perceive the biclustering interface as more complex.", month = oct, organization = "IEEE", location = "Konstanz, Germany", event = "4th International Symposium on Big Data Visual and Immersive Analytics", booktitle = "International Symposium on Big Data Visual and Immersive Analytics", keywords = "information visualization, bipartite graphs, biclustering, insight-based evaluation", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/steinboeck-2018-lbg/", } @article{mazurek-2018-veq, title = "Visualizing Expanded Query Results", author = "Michael Mazurek and Manuela Waldner", year = "2018", abstract = "When performing queries in web search engines, users often face difficulties choosing appropriate query terms. Search engines therefore usually suggest a list of expanded versions of the user query to disambiguate it or to resolve potential term mismatches. However, it has been shown that users find it difficult to choose an expanded query from such a list. In this paper, we describe the adoption of set-based text visualization techniques to visualize how query expansions enrich the result space of a given user query and how the result sets relate to each other. Our system uses a linguistic approach to expand queries and topic modeling to extract the most informative terms from the results of these queries. In a user study, we compare a common text list of query expansion suggestions to three set-based text visualization techniques adopted for visualizing expanded query results – namely, Compact Euler Diagrams, Parallel Tag Clouds, and a List View – to resolve ambiguous queries using interactive query expansion. Our results show that text visualization techniques do not increase retrieval efficiency, precision, or recall. Overall, users rate Parallel Tag Clouds visualizing key terms of the expanded query space lowest. Based on the results, we derive recommendations for visualizations of query expansion results, text visualization techniques in general, and discuss alternative use cases of set-based text visualization techniques in the context of web search.", month = jun, journal = "Computer Graphics Forum", pages = "87--98", keywords = "Information visualization, search interfaces, empirical studies in visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/", } @talk{waldner-2018-ved, title = "Visual Data Exploration and Analysis in Emerging Display Environments ", author = "Manuela Waldner", year = "2018", abstract = "Increasingly powerful computing and display hardware open up entirely new ways for visual data exploration and analysis. Powerful machines and emerging display environments facilitate novel visual exploration techniques, collaborative data analysis, and even immersion into the scientific data. This talk will address the challenges we faced when bringing biomolecular visual analysis tools and complex molecular visualizations into such large, multi-user environments. A special focus lies on interfaces and attention guidance techniques we designed and evaluated to keep the user oriented and reduce visual clutter. ", month = apr, event = "Emerging Technologies in Scientific Data Visualisation - CECAM", location = "Pisa, Italy", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/waldner-2018-ved/", } @article{polatsek-2018-stv, title = "Exploring visual attention and saliency modeling for task-based visual analysis", author = "Patrik Polatsek and Manuela Waldner and Ivan Viola and Peter Kapec and Wanda Benesova", year = "2018", abstract = "Memory, visual attention and perception play a critical role in the design of visualizations. The way users observe a visualization is affected by salient stimuli in a scene as well as by domain knowledge, interest, and the task. While recent saliency models manage to predict the users’ visual attention in visualizations during exploratory analysis, there is little evidence how much influence bottom-up saliency has on task-based visual analysis. Therefore, we performed an eye-tracking study with 47 users to determine the users’ path of attention when solving three low-level analytical tasks using 30 different charts from the MASSVIS database [1]. We also compared our task-based eye tracking data to the data from the original memorability experiment by Borkin et al. [2]. We found that solving a task leads to more consistent viewing patterns compared to exploratory visual analysis. However, bottom-up saliency of a visualization has negligible influence on users’ fixations and task efficiency when performing a low-level analytical task. Also, the efficiency of visual search for an extreme target data point is barely influenced by the target’s bottom-up saliency. Therefore, we conclude that bottom-up saliency models tailored towards information visualization are not suitable for predicting visual attention when performing task-based visual analysis. We discuss potential reasons and suggest extensions to visual attention models to better account for task-based visual analysis.", month = feb, doi = "https://doi.org/10.1016/j.cag.2018.01.010", journal = "Computers & Graphics", number = "2", keywords = "Information visualization, Eye-tracking experiment, Saliency, Visual attention, Low-level analytical tasks", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/", } @talk{Waldner_2017_11, title = "Guiding Attention in Complex Visualizations using Flicker", author = "Manuela Waldner", year = "2017", abstract = "Drawing the user’s gaze to an important item in an image or a graphical user interface is a common challenge. Usually, some form of highlighting is used, such as a clearly distinct color or a border around the item. Flicker is also a strong visual attractor in the entire visual field, without distorting, suppressing, or adding any scene elements. While it is very salient, it is often perceived as annoying. In this talk, I will present our research on how flicker can be used as attention guidance technique in cluttered visualizations while lowering its negative side-effects. In particular, I will first present results of studies examining a two-stage flicker technique for dynamic visualizations on large displays. Then, I will present we our explorations of high frequency flicker (60 to 72 Hz) to guide the user’s attention in images. At such high frequencies, the critical flicker frequency (CFF) threshold is reached, which makes the flicker appear to fuse into a stable signal. However, the CFF is not uniform across the visual field, but is higher in the peripheral vision at normal lighting conditions. We show that high frequency flicker, using personalized attributes like patch size and luminance, can be easily detected by observers in the peripheral vision, but the signal is hardly visible in the foveal vision when users directly look at the flickering patch. We demonstrate that this property can be used to draw the user’s attention to important image regions using a standard high refresh-rate computer monitor with minimal visible modifications to the image.", month = nov, event = "S&T Cooperation Austria-Czech Republic", location = "Czech Technical University", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Waldner_2017_11/", } @inproceedings{geymayer-2017-std, title = "How Sensemaking Tools Influence Display Space Usage", author = "Thomas Geymayer and Manuela Waldner and Alexander Lex and Dieter Schmalstieg", year = "2017", abstract = "We explore how the availability of a sensemaking tool influences users’ knowledge externalization strategies. On a large display, users were asked to solve an intelligence analysis task with or without a bidirectionally linked concept-graph (BLC) to organize insights into concepts (nodes) and relations (edges). In BLC, both nodes and edges maintain “deep links” to the exact source phrases and sections in associated documents. In our control condition, we were able to reproduce previously described spatial organization behaviors using document windows on the large display. When using BLC, however, we found that analysts apply spatial organization to BLC nodes instead, use significantly less display space and have significantly fewer open windows.", month = jun, event = "EuroVis 2017", booktitle = "EuroVis Workshop on Visual Analytics", keywords = "sensemaking, large displays, evaluation", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/geymayer-2017-std/", } @article{Waldin_Nicholas_2017_FlickerObserver, title = "Flicker Observer Effect: Guiding Attention Through High Frequency Flicker in Images", author = "Nicholas Waldin and Manuela Waldner and Ivan Viola", year = "2017", abstract = "Drawing the user's gaze to an important item in an image or a graphical user interface is a common challenge. Usually, some form of highlighting is used, such as a clearly distinct color or a border around the item. Flicker can also be very salient, but is often perceived as annoying. In this paper, we explore high frequency flicker (60 to 72 Hz) to guide the user's attention in an image. At such high frequencies, the critical flicker frequency (CFF) threshold is reached, which makes the flicker appear to fuse into a stable signal. However, the CFF is not uniform across the visual field, but is higher in the peripheral vision at normal lighting conditions. Through experiments, we show that high frequency flicker can be easily detected by observers in the peripheral vision, but the signal is hardly visible in the foveal vision when users directly look at the flickering patch. We demonstrate that this property can be used to draw the user's attention to important image regions using a standard high refresh-rate computer monitor with minimal visible modifications to the image. In an uncalibrated visual search task, users could in a crowded image easily spot the specified search targets flickering with very high frequency. They also reported that high frequency flicker was distracting when they had to attend to another region, while it was hardly noticeable when looking at the flickering region itself.", month = may, journal = "Computer Graphics Forum", volume = "36", number = "2", pages = "467--476", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Waldin_Nicholas_2017_FlickerObserver/", } @inproceedings{waldner-2017-vph, title = "Exploring Visual Prominence of Multi-Channel Highlighting in Visualizations", author = "Manuela Waldner and Alexey Karimov and Eduard Gr\"{o}ller", year = "2017", abstract = "Visualizations make rich use of multiple visual channels so that there are few resources left to make selected focus elements visually distinct from their surrounding context. A large variety of highlighting techniques for visualizations has been presented in the past, but there has been little systematic evaluation of the design space of highlighting. We explore highlighting from the perspective of visual marks and channels – the basic building blocks of visualizations that are directly controlled by visualization designers. We present the results from two experiments, exploring the visual prominence of highlighted marks in scatterplots: First, using luminance as a single highlight channel, we found that visual prominence is mainly determined by the luminance difference between the focus mark and the brightest context mark. The brightness differences between context marks and the overall brightness level have negligible influence. Second, multi-channel highlighting using luminance and blur leads to a good trade-off between highlight effectiveness and aesthetics. From the results, we derive a simple highlight model to balance highlighting across multiple visual channels and focus and context marks, respectively.", month = may, booktitle = "Spring Conference on Computer Graphics 2017", keywords = "information visualization, highlighting, focus+context, visual prominence", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/", } @article{bernhard-2016-gft, title = " The Accuracy of Gauge-Figure Tasks in Monoscopic and Stereo Displays", author = "Matthias Bernhard and Manuela Waldner and Pascal Plank and Veronika Solteszova and Ivan Viola", year = "2016", abstract = "The gauge-figure task (GFT) is a widespread method used to study surface perception for evaluating rendering and visualization techniques. The authors investigate how accurately slant angles probed on well-defined objects align with the ground truth (GT) in monoscopic and stereoscopic displays. Their results show that the GFT probes taken with well-defined objects align well with the GT in the all-monoscopic and all-stereoscopic conditions. However, they found that a GF rendered in stereo over a monoscopic stimulus results in a strong slant underestimation and that an overestimation occurred in the inverse case (monoscopic GF andstereoscopic stimulus). They discuss how their findings affect the interpretation of absolute GFT measures, compared to the GT normal.", month = jul, journal = "IEEE Computer Graphics and Applications", number = "4", volume = "36", pages = "56--66", keywords = "computer graphics, gauge-figure task, perceptual visualization, shape perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/", } @inproceedings{Waldin_Nicholas_2016_Chameleon, title = "Chameleon Dynamic Color Mapping for Multi-Scale Structural Biology Models", author = "Nicholas Waldin and Mathieu Le Muzic and Manuela Waldner and Eduard Gr\"{o}ller and David Goodsell and Ludovic Autin and Ivan Viola", year = "2016", abstract = "Visualization of structural biology data uses color to categorize or separate dense structures into particular semantic units. In multiscale models of viruses or bacteria, there are atoms on the finest level of detail, then amino-acids, secondary structures, macromolecules, up to the compartment level and, in all these levels, elements can be visually distinguished by color. However, currently only single scale coloring schemes are utilized that show information for one particular scale only. We present a novel technology which adaptively, based on the current scale level, adjusts the color scheme to depict or distinguish the currently best visible structural information. We treat the color as a visual resource that is distributed given a particular demand. The changes of the color scheme are seamlessly interpolated between the color scheme from the previous views into a given new one. With such dynamic multi-scale color mapping we ensure that the viewer is able to distinguish structural detail that is shown on any given scale. This technique has been tested by users with an expertise in structural biology and has been overall well received.", event = "VCBM", booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/", } @inproceedings{lemuzic_2015_timelapse, title = "Illustrative Timelapse: A Technique for Illustrative Visualization of Particle Simulations on the Mesoscale Level", author = "Mathieu Le Muzic and Manuela Waldner and Julius Parulek and Ivan Viola", year = "2015", abstract = "Animated movies are a popular way to communicate complex phenomena in cell biology to the broad audience. Animation artists apply sophisticated illustration techniques to communicate a story, while trying to maintain a realistic representation of a complex dynamic environment. Since such hand-crafted animations are timeconsuming and cost-intensive to create, our goal is to formalize illustration techniques used by artists to facilitate the automatic creation of visualizations generated from mesoscale particle-based molecular simulations. Our technique Illustrative Timelapse supports visual exploration of complex biochemical processes in dynamic environments by (1) seamless temporal zooming to observe phenomena in different temporal resolutions, (2) visual abstraction of molecular trajectories to ensure that observers are able to visually follow the main actors, (3) increased visual focus on events of interest, and (4) lens effects to preserve a realistic representation of the environment in the context. Results from a first user study indicate that visual abstraction of trajectories improves the ability to follow a story and is also appreciated by users. Lens effects increased the perceived amount of molecular motion in the environment while trading off traceability of individual molecules.", month = apr, publisher = "IEEE", organization = "8th IEEE Pacific Visualization Symposium (PacificVis 2015)", location = "Zijingang Campus, Zhejiang University, Hangzhou, China", booktitle = "Visualization Symposium (PacificVis), 2015 IEEE Pacific", pages = "247--254", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/lemuzic_2015_timelapse/", } @article{waldner-2014-af, title = " Attractive Flicker: Guiding Attention in Dynamic Narrative Visualizations", author = "Manuela Waldner and Mathieu Le Muzic and Matthias Bernhard and Werner Purgathofer and Ivan Viola", year = "2014", abstract = "Focus+context techniques provide visual guidance in visualizations by giving strong visual prominence to elements of interest while the context is suppressed. However, finding a visual feature to enhance for the focus to pop out from its context in a large dynamic scene, while leading to minimal visual deformation and subjective disturbance, is challenging. This paper proposes Attractive Flicker, a novel technique for visual guidance in dynamic narrative visualizations. We first show that flicker is a strong visual attractor in the entire visual field, without distorting, suppressing, or adding any scene elements. The novel aspect of our Attractive Flicker technique is that it consists of two signal stages: The first “orientation stage” is a short but intensive flicker stimulus to attract the attention to elements of interest. Subsequently, the intensive flicker is reduced to a minimally disturbing luminance oscillation (“engagement stage”) as visual support to keep track of the focus elements. To find a good trade-off between attraction effectiveness and subjective annoyance caused by flicker, we conducted two perceptual studies to find suitable signal parameters. We showcase Attractive Flicker with the parameters obtained from the perceptual statistics in a study of molecular interactions. With Attractive Flicker, users were able to easily follow the narrative of the visualization on a large display, while the flickering of focus elements was not disturbing when observing the context.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "12", pages = "2456--2465", keywords = "Narrative Visualization, Flicker, Visual Attention", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/", } @inproceedings{waldner-2014-ghi, title = "Graphical Histories of Information Foraging", author = "Manuela Waldner and Stefan Bruckner and Ivan Viola", year = "2014", abstract = "During information foraging, knowledge workers iteratively seek, filter, read, and extract information. When using multiple information sources and different applications for information processing, re-examination of activities for validation of previous decisions or re-discovery of previously used information sources is challenging. In this paper, we present a novel representation of cross-application histories to support recall of past operations and re-discovery of information resources. Our graphical history consists of a cross-scale visualization combining an overview node-link diagram of used desktop resources with nested (animated) snapshot sequences, based on a recording of the visual screen output during the users’ desktop work. This representation makes key elements of the users’ tasks visually stand out, while exploiting the power of visual memory to recover subtle details of their activities. In a preliminary study, users found our graphical history helpful to recall details of an information foraging task and commented positively on the ability to expand overview nodes into snapshot and video sequences.", month = oct, isbn = "978-1-4503-2542-4", publisher = "ACM", organization = "NordiCHI’14 - Nordic Conference on Human-Computer Interaction", location = "Helsinki, Finland", booktitle = "Proceedings of the 8th Nordic Conference on Human-Computer Interaction: Fun, Fast, Foundational ", pages = "295--304", keywords = "Graph visualization, Interaction history, Provenance", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/", } @misc{lemuzic_2014_ipv, title = "Illustrative Visualization of Biochemical Processes Featuring Multiple Temporal Scales", author = "Mathieu Le Muzic and Julius Parulek and Manuela Waldner and Ivan Viola", year = "2014", abstract = "Scientific illustrators are commonly using structural description of molecular compounds when depicting complex biochemical processes. However, computational biology also provides procedural models describing the function of biological processes which are not currently used in the production pipeline. Instead, animators utilize scientific knowledge to manually animate and reproduce the functioning of cellular biology. We would like to explore the use of such models in order to generate explanatory illustrations that would show how molecular machinery works. Particle-based simulations provide the means for spatially representing the dynamics of biochemical processes. They compute the positions of each single particle and are supposed to mimic a realistic behaviour of the metabolites. Current mesoscale visualization also allows to directly show the results of such simulations by mapping the positions of particles in a virtual 3D environment. Nevertheless, some biochemical processes, like the DNA repair for instance, exhibit temporal multiscale aspects because they comprise diffusion rates which are much greater in comparison with reaction rates. As a result, it is challenging to produce a clear and coherent visualization out of this type of simulation. Indeed, when viewing the process at the pace which would let us see the reactions, it becomes impossible for the human eye to keep track of individual elements because of the very large diffusion displacements. On the other hand, if one would playback the simulation slow enough to be see a steady motion of individual elements, then only a very few number of reactions would occur in a reasonable amount of time. In this work we propose to solve the problem associated with multiple temporal scales by providing means for spatial. With this approach we aim at showing the two different temporal scale at the same time by using advanced trajectory smoothing mechanism. This would allow us to see individual elements while showing a world full of reactions, hence enabling us to communicate complex biological processes and molecular machineries in a comprehensive way. ", event = "Eurographics Workshop on Visual Computing for Biology", Conference date = "Poster presented at Eurographics Workshop on Visual Computing for Biology (2014-09-04--2014-09-05)", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/lemuzic_2014_ipv/", } @inproceedings{waldner-2013-ubiWM, title = "Towards Ubiquitous Information Space Management", author = "Manuela Waldner and Dieter Schmalstieg", year = "2013", abstract = "Large, high-resolution display spaces are usually created by carefully aligning multiple monitors or projectors to obtain a perfectly flat, rectangular display. In this paper, we suggest the usage of imperfect surfaces as extension of personal workspaces to create ubiquitous, personalized information spaces. We identify five environmental factors ubiquitous information spaces need to consider: 1) user location and display visibility, 2) display gaps and holes, 3) corners and non-planarity of the display surface, 4) physical objects within and around the display surface, and 5) non-rectangular display shapes. Instead of compensating for fragmentations and non-planarity of the information space, we propose a ubiquitous information space manager, adapting interaction and window rendering techniques to the above mentioned factors. We hypothesize that knowledge workers will benefit from such ubiquitous information spaces due to increased exploitation of spatial cognition. ", month = may, isbn = "978-1-4503-1952-2", publisher = "ACM", location = "Paris, France", booktitle = "POWERWALL: International Workshop on Interactive, Ultra-High-Resolution Displays, part of the SIGCHI Conference on Human Factors in Computing Systems (2013)", pages = "1--6", keywords = "information management, ubiquitous displays", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-ubiWM/", } @inproceedings{waldner-2013-facetCloudsGI, title = "FacetClouds: Exploring Tag Clouds for Multi-Dimensional Data", author = "Manuela Waldner and Johann Schrammel and Michael Klein and Katrin Kristjansdottir and Dominik Unger and Manfred Tscheligi", year = "2013", abstract = "Tag clouds are simple yet very widespread representations of how often certain words appear in a collection. In conventional tag clouds, only a single visual text variable is actively controlled: the tags’ font size. Previous work has demonstrated that font size is indeed the most influential visual text variable. However, there are other variables, such as text color, font style and tag orientation, that could be manipulated to encode additional data dimensions. FacetClouds manipulate intrinsic visual text variables to encode multiple data dimensions within a single tag cloud. We conducted a series of experiments to detect the most appropriate visual text variables for encoding nominal and ordinal values in a cloud with tags of varying font size. Results show that color is the most expressive variable for both data types, and that a combination of tag rotation and background color range leads to the best overall performance when showing multiple data dimensions in a single tag cloud. ", month = may, isbn = "978-1-4822-1680-6 ", publisher = "ACM Publishing House", organization = "ACM Siggraph", location = "Regina, Saskatchewan, Canada", address = "Regina, Saskatchewan, Canada", booktitle = "Proceedings of the 2013 Graphics Interface Conference", pages = "17--24", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/", } @article{wu-2021, title = "Visualization working group at TU Wien: Visible Facimus Quod Ceteri Non Possunt", author = "Hsiang-Yun Wu and Aleksandr Amirkhanov and Nicolas Grossmann and Tobias Klein and David Kou\v{r}il and Haichao Miao and Laura R. Luidolt and Peter Mindek and Renata Raidou and Ivan Viola and Manuela Waldner and Eduard Gr\"{o}ller", abstract = "Building-up and running a university-based research group is a multi-faceted undertaking. The visualization working group at TU Wien (vis-group) has been internationally active over more than 25 years. The group has been acting in a competitive scientific setting where sometimes contradicting multiple objectives require trade-offs and optimizations. Research-wise the group has been performing basic and applied research in visualization and visual computing. Teaching-wise the group has been involved in undergraduate and graduate lecturing in (medical) visualization and computer graphics. To be scientifically competitive requires to constantly expose the group and its members to a strong international competition at the highest level. This necessitates to shield the members against the ensuing pressures and demands and provide (emotional) support and encouragement. Internally, the vis-group has developed a unique professional and social interaction culture: work and celebrate, hard and together. This has crystallized into a nested, recursive, and triangular organization model, which concretizes what it takes to make a research group successful. The key elements are the creative and competent vis-group members who collaboratively strive for (scientific) excellence in a socially enjoyable environment.", doi = "https://doi.org/10.1016/j.visinf.2021.02.003", journal = "Visual Informatics", volume = "5", pages = "76--84", URL = "https://www.cg.tuwien.ac.at/research/publications/ongoing/wu-2021/", }