@inproceedings{stappen_SteFAS, title = "Temporally Stable Content-Adaptive and Spatio-Temporal Shading Rate Assignment for Real-Time Applications", author = "Stefan Stappen and Johannes Unterguggenberger and Bernhard Kerbl and Michael Wimmer", year = "2021", abstract = "We propose two novel methods to improve the efficiency and quality of real-time rendering applications: Texel differential-based content-adaptive shading (TDCAS) and spatio-temporally filtered adaptive shading (STeFAS). Utilizing Variable Rate Shading (VRS)-a hardware feature introduced with NVIDIA's Turing micro-architecture-and properties derived during rendering or Temporal Anti-Aliasing (TAA), our techniques adapt the resolution to improve the performance and quality of real-time applications. VRS enables different shading resolution for different regions of the screen during a single render pass. In contrast to other techniques, TDCAS and STeFAS have very little overhead for computing the shading rate. STeFAS enables up to 4x higher rendering resolutions for similar frame rates, or a performance increase of 4× at the same resolution.", month = oct, isbn = "978-3-03868-162-5", publisher = "Eurographics Association", organization = "The Eurographics Association", location = "online", event = "Pacific Graphics 2021", editor = "Lee, Sung-Hee and Zollmann, Stefanie and Okabe, Makoto and W\"{u}nsche, Burkhard", doi = "10.2312/pg.20211391", booktitle = "Pacific Graphics Short Papers, Posters, and Work-in-Progress Papers", pages = "2", pages = "65--66", keywords = "variable rate shading, temporal antialiasing", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/stappen_SteFAS/", } @inproceedings{grossmann-2021-layout, title = "Does the Layout Really Matter? A Study on Visual Model Accuracy Estimation", author = "Nicolas Grossmann and J\"{u}rgen Bernard and Michael Sedlmair and Manuela Waldner", year = "2021", abstract = "In visual interactive labeling, users iteratively assign labels to data items until the machine model reaches an acceptable accuracy. A crucial step of this process is to inspect the model's accuracy and decide whether it is necessary to label additional elements. In scenarios with no or very little labeled data, visual inspection of the predictions is required. Similarity-preserving scatterplots created through a dimensionality reduction algorithm are a common visualization that is used in these cases. Previous studies investigated the effects of layout and image complexity on tasks like labeling. However, model evaluation has not been studied systematically. We present the results of an experiment studying the influence of image complexity and visual grouping of images on model accuracy estimation. We found that users outperform traditional automated approaches when estimating a model's accuracy. Furthermore, while the complexity of images impacts the overall performance, the layout of the items in the plot has little to no effect on estimations.", month = oct, publisher = "IEEE Computer Society Press", event = "IEEE Visualization Conference (VIS)", doi = "10.1109/VIS49827.2021.9623326", booktitle = "IEEE Visualization Conference (VIS)", pages = "5", pages = "61--65", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/", } @inproceedings{schindler_2020vis, title = "The Anatomical Edutainer", author = "Marwin Schindler and Hsiang-Yun Wu and Renata Raidou", year = "2020", abstract = "Physical visualizations (i.e., data representations by means of physical objects) have been used for many centuries in medical and anatomical education. Recently, 3D printing techniques started also to emerge. Still, other medical physicalizations that rely on affordable and easy-to-find materials are limited, while smart strategies that take advantage of the optical properties of our physical world have not been thoroughly investigated. We propose the Anatomical Edutainer, a workflow to guide the easy, accessible, and affordable generation of physicalizations for tangible, interactive anatomical edutainment. The Anatomical Edutainer supports 2D printable and 3D foldable physicalizations that change their visual properties (i.e., hues of the visible spectrum) under colored lenses or colored lights, to reveal distinct anatomical structures through user interaction.", month = oct, event = "IEEE Vis 2020", booktitle = "IEEE Vis Short Papers 2020", pages = "1--5", keywords = "Data Physicalization, Medical Visualization, Anatomical Education", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/schindler_2020vis/", } @article{waldner-2019-rld, title = "A Comparison of Radial and Linear Charts for Visualizing Daily Patterns", author = "Manuela Waldner and Alexandra Diehl and Denis Gracanin and Rainer Splechtna and Claudio Delrieux and Kresimir Matkovic", year = "2019", abstract = "Radial charts are generally considered less effective than linear charts. Perhaps the only exception is in visualizing periodical time-dependent data, which is believed to be naturally supported by the radial layout. It has been demonstrated that the drawbacks of radial charts outweigh the benefits of this natural mapping. Visualization of daily patterns, as a special case, has not been systematically evaluated using radial charts. In contrast to yearly or weekly recurrent trends, the analysis of daily patterns on a radial chart may benefit from our trained skill on reading radial clocks that are ubiquitous in our culture. In a crowd-sourced experiment with 92 non-expert users, we evaluated the accuracy, efficiency, and subjective ratings of radial and linear charts for visualizing daily traffic accident patterns. We systematically compared juxtaposed 12-hours variants and single 24-hours variants for both layouts in four low-level tasks and one high-level interpretation task. Our results show that over all tasks, the most elementary 24-hours linear bar chart is most accurate and efficient and is also preferred by the users. This provides strong evidence for the use of linear layouts – even for visualizing periodical daily patterns.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "26", doi = "10.1109/TVCG.2019.2934784", pages = "1033--1042", keywords = "radial charts, time series data, daily patterns, crowd-sourced experiment", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/", } @mastersthesis{plank-2017-sldg, title = "Effective Line Drawing Generation", author = "Pascal Plank", year = "2019", abstract = "Advanced rendering algorithms such as suggestive contours are able to depict objects in the style of line drawings with various levels of detail. How to select an appropriate level of detail is based on visual aesthetics rather than on substantial characteristics like the accuracy of 3D shape perception. The aim of this thesis is to develop a novel approach for effectively generating line drawings in the style of suggestive contours that are optimized for human 3D shape perception while retaining the amount of ink to a minimum. The proposed post-processing meta-heuristic for optimizing line drawings uses empirical thresholds based on probing human shape perception. The heuristic can also be used to optimize line drawings in terms of other visual characteristics, e.g., cognitive load, and for other line drawings styles such as ridges and valleys. The optimization routine is based on a conducted perceptual user study using the gauge figure task to collect more than 17, 000 high-quality user estimates of surface normals from suggestive contours renderings. By analyzing these data points, more in-depth understanding of how humans perceive 3D shape from line drawings is gained. Particularly the accuracy of 3D shape perception and shape ambiguity in regards to changing the level of detail and type of object presented is investigated. In addition, the collected data points are used to calculate two pixel-based perceptual characteristics: the optimal size of a local neighborhood area to estimate 3D shape from and the optimal local ink percentage in this area. In the analysis, a neighborhood size of 36 pixels with an optimal ink percentage of 17.3% could be identified. These thresholds are used to optimize suggestive contours renderings in a post-processing stage using a greedy nearest neighbor optimization scheme. The proposed meta-heuristic procedure yields visually convincing results where each pixel value is close to the identified thresholds. In terms of practical application, the optimization scheme can be used in areas where high 3D shape understanding is essential such as furniture manuals or architectural renderings. Both the empirical results regarding shape understanding as well as the practical applications of the thesis’s results form the basis to optimize other line drawing methods and to understand better how humans perceive shape from lines.", month = may, pages = "84", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", keywords = "visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/plank-2017-sldg/", } @inproceedings{Vasylevska_Khrystyna-2019-TEFVR, title = "Towards Eye-Friendly VR: How Bright Should It Be?", author = "Khrystyna Vasylevska and Hyunjin Yoo and Tara Akhavan and Hannes Kaufmann", year = "2019", abstract = "Visual information plays an important part in the perception of the world around us. Recently, head-mounted displays (HMD) came to the consumer market and became a part of the everyday life of thousands of people. Like with the desktop screens or hand-held devices before, the public is concerned with the possible health consequences of the prolonged usage and question the adequacy of the default settings. It has been shown that the brightness and contrast of a display should be adjusted to match the external light to decrease eye strain and other symptoms. Currently, there is a noticeable mismatch in brightness between the screen and dark background of an HMD that might cause eye strain, insomnia, and other unpleasant symptoms. In this paper, we explore the possibility to significantly lower the screen brightness in the HMD and successfully compensate for the loss of the visual information on a dimmed screen. We designed a user study to explore the connection between the screen brightness HMD and task performance, cybersickness, users’ comfort, and preferences. We have tested three levels of brightness: the default Full Brightness, the optional Night Mode and a significantly lower brightness with original content and compensated content. Our results suggest that although users still prefer the brighter setting, the HMDs can be successfully used with significantly lower screen brightness, especially if the low screen brightness is compensated", month = mar, publisher = "IEEE", location = "Osaka, Japan", issn = "2642-5246 ", event = "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", doi = "10.1109/VR.2019.8797752", booktitle = "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", pages = "1--9", keywords = "Virtual Reality, User Study, Perception, Head-Mounted Display", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/Vasylevska_Khrystyna-2019-TEFVR/", } @mastersthesis{trautner-2018-imd, title = "Importance-Driven Exploration of Molecular Dynamics Simulations", author = "Thomas Trautner", year = "2018", abstract = "The aim of this thesis is a novel real-time visualization approach for exploring molecular dynamics (MD-)simulations. Through the constantly improving hardware and everincreasing computing power, MD-simulations are more easily available. Additionally, they consist of hundreds, thousands or even millions of individual simulation frames and are getting more and more detailed. The calculation of such simulations is no longer limited by algorithms or hardware, nevertheless it is still not possible to efficiently explore this huge amount of simulation data, as animated 3D visualization, with ordinary and well established visualization tools. Using current software tools, the exploration of such long simulations takes too much time and due to the complexity of large molecular scenes, the visualizations highly suffer from visual clutter. It is therefore very likely that the user will miss important events. Therefore, we designed a focus & context approach for MD-simulations that guides the user to the most relevant temporal and spatial events, and it is no longer necessary to explore the simulation in a linear fashion. Our contribution can be divided into the following four topics: 1. Spatial importance through different levels of detail. Depending on the type of research task, different geometrical representations can be selected for both, focusand context elements. 2. Importance driven visibility management through ghosting, to prevent context elements from occluding focus elements. 3. Temporal importance through adaptive fast-forward. The playback speed of the simulation is thereby dependent on a single or a combination of multiple importance functions. 4. Visual declutter of accumulated frames through motion blur, which additionally illustrates the playback speed-up. Since the very beginning, this work was developed in close cooperation with biochemists from the Loschmidt Laboratories in Brno, Czech Republic. Together, we analyzed different use cases demonstrating the flexibility of our novel focus & context approach. ", month = oct, pages = "100", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "molecular dynamics simulation, realtime visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/trautner-2018-imd/", } @article{polatsek-2018-stv, title = "Exploring visual attention and saliency modeling for task-based visual analysis", author = "Patrik Polatsek and Manuela Waldner and Ivan Viola and Peter Kapec and Wanda Benesova", year = "2018", abstract = "Memory, visual attention and perception play a critical role in the design of visualizations. The way users observe a visualization is affected by salient stimuli in a scene as well as by domain knowledge, interest, and the task. While recent saliency models manage to predict the users’ visual attention in visualizations during exploratory analysis, there is little evidence how much influence bottom-up saliency has on task-based visual analysis. Therefore, we performed an eye-tracking study with 47 users to determine the users’ path of attention when solving three low-level analytical tasks using 30 different charts from the MASSVIS database [1]. We also compared our task-based eye tracking data to the data from the original memorability experiment by Borkin et al. [2]. We found that solving a task leads to more consistent viewing patterns compared to exploratory visual analysis. However, bottom-up saliency of a visualization has negligible influence on users’ fixations and task efficiency when performing a low-level analytical task. Also, the efficiency of visual search for an extreme target data point is barely influenced by the target’s bottom-up saliency. Therefore, we conclude that bottom-up saliency models tailored towards information visualization are not suitable for predicting visual attention when performing task-based visual analysis. We discuss potential reasons and suggest extensions to visual attention models to better account for task-based visual analysis.", month = feb, doi = "https://doi.org/10.1016/j.cag.2018.01.010", journal = "Computers & Graphics", number = "2", keywords = "Information visualization, Eye-tracking experiment, Saliency, Visual attention, Low-level analytical tasks", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/", } @phdthesis{waldin-2017-thesis, title = "Using and Adapting to Limits of Human Perception in Visualization", author = "Nicholas Waldin", year = "2017", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/waldin-2017-thesis/", } @article{Waldin_Nicholas_2017_FlickerObserver, title = "Flicker Observer Effect: Guiding Attention Through High Frequency Flicker in Images", author = "Nicholas Waldin and Manuela Waldner and Ivan Viola", year = "2017", abstract = "Drawing the user's gaze to an important item in an image or a graphical user interface is a common challenge. Usually, some form of highlighting is used, such as a clearly distinct color or a border around the item. Flicker can also be very salient, but is often perceived as annoying. In this paper, we explore high frequency flicker (60 to 72 Hz) to guide the user's attention in an image. At such high frequencies, the critical flicker frequency (CFF) threshold is reached, which makes the flicker appear to fuse into a stable signal. However, the CFF is not uniform across the visual field, but is higher in the peripheral vision at normal lighting conditions. Through experiments, we show that high frequency flicker can be easily detected by observers in the peripheral vision, but the signal is hardly visible in the foveal vision when users directly look at the flickering patch. We demonstrate that this property can be used to draw the user's attention to important image regions using a standard high refresh-rate computer monitor with minimal visible modifications to the image. In an uncalibrated visual search task, users could in a crowded image easily spot the specified search targets flickering with very high frequency. They also reported that high frequency flicker was distracting when they had to attend to another region, while it was hardly noticeable when looking at the flickering region itself.", month = may, journal = "Computer Graphics Forum", volume = "36", number = "2", pages = "467--476", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Waldin_Nicholas_2017_FlickerObserver/", } @inproceedings{waldner-2017-vph, title = "Exploring Visual Prominence of Multi-Channel Highlighting in Visualizations", author = "Manuela Waldner and Alexey Karimov and Eduard Gr\"{o}ller", year = "2017", abstract = "Visualizations make rich use of multiple visual channels so that there are few resources left to make selected focus elements visually distinct from their surrounding context. A large variety of highlighting techniques for visualizations has been presented in the past, but there has been little systematic evaluation of the design space of highlighting. We explore highlighting from the perspective of visual marks and channels – the basic building blocks of visualizations that are directly controlled by visualization designers. We present the results from two experiments, exploring the visual prominence of highlighted marks in scatterplots: First, using luminance as a single highlight channel, we found that visual prominence is mainly determined by the luminance difference between the focus mark and the brightest context mark. The brightness differences between context marks and the overall brightness level have negligible influence. Second, multi-channel highlighting using luminance and blur leads to a good trade-off between highlight effectiveness and aesthetics. From the results, we derive a simple highlight model to balance highlighting across multiple visual channels and focus and context marks, respectively.", month = may, booktitle = "Spring Conference on Computer Graphics 2017", keywords = "information visualization, highlighting, focus+context, visual prominence", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/", } @bachelorsthesis{Koszticsak-2017-ewt, title = "Generating Expressive Window Thumbnails through Seam Carving", author = "Rebeka Koszticsak", year = "2017", abstract = "Thumbnails are used to display lists of open windows or tabs when switching between them on computers and on mobile devices. These images make it easier to recognize the opened applications, and help to find the needed window quicker. Thumbnails however only display a screenshot of the windows, so they get potentially confusing if there are more opened windows or if the same application is opened multiple times. Depending on the resolution of the display, the screenshot size decreases as the number of opened windows increases. Furthermore, within the same application (like MS Office World) the screenshots are similar in appearance (e.g. : white paper and tool bar), but the important text is not readable. There are several approaches that filter the important areas of the images to enhance the main region. In this bachelor thesis an application is implemented that uses the above methods on screenshots. Screenshots of windows are reduced by cropping the irrelevant elements of the margin area using seam carving, i.e. by eliminating the non-important pixel paths; and by common down-sampling. As a result the thumbnails show only relevant information, which makes them more expressive and easier to fulfill their purpose.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Koszticsak-2017-ewt/", } @article{Groeller_2016_P7, title = "Depth functions as a quality measure and for steering multidimensional projections", author = "Douglas Cedrim and Viktor Vad and Afonso Paiva and Eduard Gr\"{o}ller and Luis Gustavo Nonato and Antonio Castelo", year = "2016", abstract = "The analysis of multidimensional data has been a topic of continuous research for many years.This type of data can be found inseveral different areas ofscience. The analysis of multidimensional data has been a topic of continuous research for many years. This type of data can be found in several different areas of science. A common task while analyzing such data is to investigate patterns by interacting with spatializations of the data in a visual domain. Understanding the relation between the underlying dataset characteristics and the technique used to provide its visual representation is of fundamental importance since it can provide a better intuition on what to expect from the spatialization. In this paper, we propose the usage of concepts from non-parametric statistics, namely depth functions, as a quality measure for spatializations. We evaluate the action of multi-dimensional projection techniques on such estimates. We apply both qualitative and quantitative ana-lyses on four different multidimensional techniques selected according to the properties they aim to preserve. We evaluate them with datasets of different characteristics: synthetic, real world, high dimensional; and contaminated with outliers. As a straightforward application, we propose to use depth information to guide multidimensional projection techniques which rely on interaction through control point selection and positioning. Even for techniques which do not intend to preserve any centrality measure, interesting results can be achieved by separating regions possibly contaminated with outliers. ", month = nov, journal = "Computers & Graphics (Special Section on SIBGRAPI 2016)", volume = "60", issn = "doi: 10.1016/j.cag.2016.08.008", pages = "93--106", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P7/", } @article{bernhard-2016-gft, title = " The Accuracy of Gauge-Figure Tasks in Monoscopic and Stereo Displays", author = "Matthias Bernhard and Manuela Waldner and Pascal Plank and Veronika Solteszova and Ivan Viola", year = "2016", abstract = "The gauge-figure task (GFT) is a widespread method used to study surface perception for evaluating rendering and visualization techniques. The authors investigate how accurately slant angles probed on well-defined objects align with the ground truth (GT) in monoscopic and stereoscopic displays. Their results show that the GFT probes taken with well-defined objects align well with the GT in the all-monoscopic and all-stereoscopic conditions. However, they found that a GF rendered in stereo over a monoscopic stimulus results in a strong slant underestimation and that an overestimation occurred in the inverse case (monoscopic GF andstereoscopic stimulus). They discuss how their findings affect the interpretation of absolute GFT measures, compared to the GT normal.", month = jul, journal = "IEEE Computer Graphics and Applications", number = "4", volume = "36", pages = "56--66", keywords = "computer graphics, gauge-figure task, perceptual visualization, shape perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/", } @habilthesis{viola-evr, title = "Effective Visual Representations", author = "Ivan Viola", year = "2016", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/viola-evr/", } @article{Waldin_Nicholas_2016_Colormaps, title = "Personalized 2D color maps", author = "Nicholas Waldin and Matthias Bernhard and Ivan Viola", year = "2016", abstract = "2D color maps are often used to visually encode complex data characteristics such as heat or height. The comprehension of color maps in visualization is affected by the display (e.g., a monitor) and the perceptual abilities of the viewer. In this paper we present a novel method to measure a user׳s ability to distinguish colors of a two-dimensional color map on a given monitor. We show how to adapt the color map to the user and display to optimally compensate for the measured deficiencies. Furthermore, we improve user acceptance of the calibration procedure by transforming the calibration into a game. The user has to sort colors along a line in a 3D color space in a competitive fashion. The errors the user makes in sorting these lines are used to adapt the color map to his perceptual capabilities.", issn = "0097-8493", journal = "Computers & Graphics", volume = "59", pages = "143--150", keywords = "Color; Perception, Perception, Color vision deficiency", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Colormaps/", } @inproceedings{Waldin_Nicholas_2016_Individualization, title = "Individualization of 2D Color Maps for People with Color Vision Deficiencies", author = "Nicholas Waldin and Matthias Bernhard and Peter Rautek and Ivan Viola", year = "2016", location = "Slomenice, Slovakia", booktitle = "Proceedings of the 32Nd Spring Conference on Computer Graphics", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Individualization/", } @article{waldner-2014-af, title = " Attractive Flicker: Guiding Attention in Dynamic Narrative Visualizations", author = "Manuela Waldner and Mathieu Le Muzic and Matthias Bernhard and Werner Purgathofer and Ivan Viola", year = "2014", abstract = "Focus+context techniques provide visual guidance in visualizations by giving strong visual prominence to elements of interest while the context is suppressed. However, finding a visual feature to enhance for the focus to pop out from its context in a large dynamic scene, while leading to minimal visual deformation and subjective disturbance, is challenging. This paper proposes Attractive Flicker, a novel technique for visual guidance in dynamic narrative visualizations. We first show that flicker is a strong visual attractor in the entire visual field, without distorting, suppressing, or adding any scene elements. The novel aspect of our Attractive Flicker technique is that it consists of two signal stages: The first “orientation stage” is a short but intensive flicker stimulus to attract the attention to elements of interest. Subsequently, the intensive flicker is reduced to a minimally disturbing luminance oscillation (“engagement stage”) as visual support to keep track of the focus elements. To find a good trade-off between attraction effectiveness and subjective annoyance caused by flicker, we conducted two perceptual studies to find suitable signal parameters. We showcase Attractive Flicker with the parameters obtained from the perceptual statistics in a study of molecular interactions. With Attractive Flicker, users were able to easily follow the narrative of the visualization on a large display, while the flickering of focus elements was not disturbing when observing the context.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "12", pages = "2456--2465", keywords = "Narrative Visualization, Flicker, Visual Attention", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/", } @article{birkeland_aasmund_2014_pums, title = "Perceptually Uniform Motion Space", author = "{\AA}smund Birkeland and Cagatay Turkay and Ivan Viola", year = "2014", abstract = "Flow data is often visualized by animated particles inserted into a ?ow ?eld. The velocity of a particle on the screen is typically linearly scaled by the velocities in the data. However, the perception of velocity magnitude in animated particles is not necessarily linear. We present a study on how different parameters affect relative motion perception. We have investigated the impact of four parameters. The parameters consist of speed multiplier, direction, contrast type and the global velocity scale. In addition, we investigated if multiple motion cues, and point distribution, affect the speed estimation. Several studies were executed to investigate the impact of each parameter. In the initial results, we noticed trends in scale and multiplier. Using the trends for the signi?cant parameters, we designed a compensation model, which adjusts the particle speed to compensate for the effect of the parameters. We then performed a second study to investigate the performance of the compensation model. From the second study we detected a constant estimation error, which we adjusted for in the last study. In addition, we connect our work to established theories in psychophysics by comparing our model to a model based on Stevens’ Power Law.", month = nov, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "11", issn = "1077-2626", pages = "1542--1554", keywords = "motion visualization, motion perception, animation, evauation, perceptual model", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/birkeland_aasmund_2014_pums/", } @inproceedings{waldner-2013-facetCloudsGI, title = "FacetClouds: Exploring Tag Clouds for Multi-Dimensional Data", author = "Manuela Waldner and Johann Schrammel and Michael Klein and Katrin Kristjansdottir and Dominik Unger and Manfred Tscheligi", year = "2013", abstract = "Tag clouds are simple yet very widespread representations of how often certain words appear in a collection. In conventional tag clouds, only a single visual text variable is actively controlled: the tags’ font size. Previous work has demonstrated that font size is indeed the most influential visual text variable. However, there are other variables, such as text color, font style and tag orientation, that could be manipulated to encode additional data dimensions. FacetClouds manipulate intrinsic visual text variables to encode multiple data dimensions within a single tag cloud. We conducted a series of experiments to detect the most appropriate visual text variables for encoding nominal and ordinal values in a cloud with tags of varying font size. Results show that color is the most expressive variable for both data types, and that a combination of tag rotation and background color range leads to the best overall performance when showing multiple data dimensions in a single tag cloud. ", month = may, isbn = "978-1-4822-1680-6 ", publisher = "ACM Publishing House", organization = "ACM Siggraph", location = "Regina, Saskatchewan, Canada", address = "Regina, Saskatchewan, Canada", booktitle = "Proceedings of the 2013 Graphics Interface Conference", pages = "17--24", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/", }