@article{mindek-2014-mcs, title = "Managing Spatial Selections with Contextual Snapshots", author = "Peter Mindek and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2014", abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing particular features, they can be analyzed and compared in different views. However, the semantics of such selections often depend on specific parameter settings and it can be difficult to reconstruct them without additional information. In this paper, we present the concept of contextual snapshots as an effective means for managing spatial selections in visualized data. The selections are automatically associated with the context in which they have been created. Contextual snapshots can also be used as the basis for interactive integrated and linked views, which enable in-place investigation and comparison of multiple visual representations of data. Our approach is implemented as a flexible toolkit with well-defined interfaces for integration into existing systems. We demonstrate the power and generality of our techniques by applying them to several distinct scenarios such as the visualization of simulation data, the analysis of historical documents, and the display of anatomical data.", month = dec, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "33", pages = "132--144", keywords = "annotations, spatial selections, visual analytics, interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/mindek-2014-mcs/", } @article{waldner-2014-af, title = " Attractive Flicker: Guiding Attention in Dynamic Narrative Visualizations", author = "Manuela Waldner and Mathieu Le Muzic and Matthias Bernhard and Werner Purgathofer and Ivan Viola", year = "2014", abstract = "Focus+context techniques provide visual guidance in visualizations by giving strong visual prominence to elements of interest while the context is suppressed. However, finding a visual feature to enhance for the focus to pop out from its context in a large dynamic scene, while leading to minimal visual deformation and subjective disturbance, is challenging. This paper proposes Attractive Flicker, a novel technique for visual guidance in dynamic narrative visualizations. We first show that flicker is a strong visual attractor in the entire visual field, without distorting, suppressing, or adding any scene elements. The novel aspect of our Attractive Flicker technique is that it consists of two signal stages: The first “orientation stage” is a short but intensive flicker stimulus to attract the attention to elements of interest. Subsequently, the intensive flicker is reduced to a minimally disturbing luminance oscillation (“engagement stage”) as visual support to keep track of the focus elements. To find a good trade-off between attraction effectiveness and subjective annoyance caused by flicker, we conducted two perceptual studies to find suitable signal parameters. We showcase Attractive Flicker with the parameters obtained from the perceptual statistics in a study of molecular interactions. With Attractive Flicker, users were able to easily follow the narrative of the visualization on a large display, while the flickering of focus elements was not disturbing when observing the context.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "12", pages = "2456--2465", keywords = "Narrative Visualization, Flicker, Visual Attention", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/", } @article{beham-2014-cupid, title = "Cupid: Cluster-based Exploration of Geometry Generators with Parallel Coordinates and Radial Trees", author = "Michael Beham and Wolfgang Herzner and Eduard Gr\"{o}ller and Johannes Kehrer", year = "2014", abstract = "Geometry generators are commonly used in video games and evaluation systems for computer vision to create geometric shapes such as terrains, vegetation or airplanes. The parameters of the generator are often sampled automatically which can lead to many similar or unwanted geometric shapes. In this paper, we propose a novel visual exploration approach that combines the abstract parameter space of the geometry generator with the resulting 3D shapes in a composite visualization. Similar geometric shapes are first grouped using hierarchical clustering and then nested within an illustrative parallel coordinates visualization. This helps the user to study the sensitivity of the generator with respect to its parameter space and to identify invalid parameter settings. Starting from a compact overview representation, the user can iteratively drill-down into local shape differences by clicking on the respective clusters. Additionally, a linked radial tree gives an overview of the cluster hierarchy and enables the user to manually split or merge clusters. We evaluate our approach by exploring the parameter space of a cup generator and provide feedback from domain experts.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "12", issn = "1077-2626", pages = "1693--1702 ", keywords = "3D shape analysis, radial trees, hierarchical clustering, illustrative parallel coordinates, composite visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/beham-2014-cupid/", } @article{Groeller_2014_RWA, title = "Run Watchers: Automatic Simulation-Based Decision Support in Flood Management", author = "Artem Konev and J\"{u}rgen Waser and Berhard Sadransky and Daniel Cornel and Rui A.P. Perdigao and Zsolt Horvath and Eduard Gr\"{o}ller", year = "2014", abstract = "In this paper, we introduce a simulation-based approach to design protection plans for flood events. Existing solutions require a lot of computation time for an exhaustive search, or demand for a time-consuming expert supervision and steering. We present a faster alternative based on the automated control of multiple parallel simulation runs. Run Watchers are dedicated system components authorized to monitor simulation runs, terminate them, and start new runs originating from existing ones according to domain-specific rules. This approach allows for a more efficient traversal of the search space and overall performance improvements due to a re-use of simulated states and early termination of failed runs. In the course of search, Run Watchers generate large and complex decision trees. We visualize the entire set of decisions made by Run Watchers using interactive, clustered timelines. In addition, we present visualizations to explain the resulting response plans. Run Watchers automatically generate storyboards to convey plan details and to justify the underlying decisions, including those which leave particular buildings unprotected. We evaluate our solution with domain experts.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "12", issn = "1077-2626", booktitle = "IEEE Transactions on Visualization and Computer Graphics/Proceedings of VAST 2014", publisher = "IEEE", pages = "1873--1882", keywords = "visual evidence, Disaster management, simulation control, storytelling, decision making", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_2014_RWA/", } @article{Matkovic-2014-ieee, title = "Visual Analytics for Complex Engineering Systems: Hybrid Visual Steering of Simulation Ensembles", author = "Kresimir Matkovic and Denis Gracanin and Rainer Splechtna and M. Jelovic and Benedikt Stehno and Helwig Hauser and Werner Purgathofer", year = "2014", abstract = "In this paper we propose a novel approach to hybrid visual steering of simulation ensembles. A simulation ensemble is a collection of simulation runs of the same simulation model using different sets of control parameters. Complex engineering systems have very large parameter spaces so a nai?ve sampling can result in prohibitively large simulation ensembles. Interactive steering of simulation ensembles provides the means to select relevant points in a multi-dimensional parameter space (design of experiment). Interactive steering efficiently reduces the number of simulation runs needed by coupling simulation and visualization and allowing a user to request new simulations on the fly. As system complexity grows, a pure interactive solution is not always sufficient. The new approach of hybrid steering combines interactive visual steering with automatic optimization. Hybrid steering allows a domain expert to interactively (in a visualization) select data points in an iterative manner, approximate the values in a continuous region of the simulation space (by regression) and automatically find the “best” points in this continuous region based on the specified constraints and objectives (by optimization). We argue that with the full spectrum of optimization options, the steering process can be improved substantially. We describe an integrated system consisting of a simulation, a visualization, and an optimization component. We also describe typical tasks and propose an interactive analysis workflow for complex engineering systems. We demonstrate our approach on a case study from automotive industry, the optimization of a hydraulic circuit in a high pressure common rail Diesel injection system.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "12", issn = "1077-2626", pages = "1803--1812", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Matkovic-2014-ieee/", } @incollection{Purgathofer-2014-PanEu, title = "Requirements on the Staff of an Application Oriented Research Organization", author = "Werner Purgathofer", year = "2014", abstract = "The VRVis Research Center in Vienna is the largest technology transfer institution in the area of Visual Computing in Austria. The requirements of the funding body FFG include the publication of scientific research results in first class peer reviewed media, and the active cooperation with co-funding companies. As a consequence the requirements on the staff of VRVis are manifold: they have to communicate with real users, use real data, know about software and hardware, understand the market, do professional documentation, initiate new projects and write funding proposals for these, be part of the scientific community and publish and review papers, manage several projects in parallel and obey strict deadlines for their projects and some more. Such staff is barely available and must be trained on the job.", month = dec, booktitle = "Current Issues of Science and Research in the Global World", chapter = "Proceedings of the International Conference on Current Issues of Science and Research in the Global World, Vienna, Austria; 27–28 May 2014", editor = "Vlasta Kunova, Martin Dolinsky", isbn = "9781138027398", note = "December 15, 2014 by CRC Press", publisher = "CRC Balkema Press, Netherlands", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Purgathofer-2014-PanEu/", } @mastersthesis{Bauer_Johannes_2014_IWB, title = "Integration of Web-Based Information Visualizations into a Scientific Visualization Environment", author = "Johannes Bauer", year = "2014", abstract = "Today’s neuro-biological research is often based on brains of the Drosophila Melanogaster, the commonly known fruit fly. To study the function of neuronal circuits scientists often have to compare the neuronal structures of a set of different brains. Their aim is to find out how complex behavior is generated. Therefore the scientists at the Institute of Molecular Pathology (IMP) in Vienna are using a confocal microscope to produce volumetric images of Drosophila brains. Today they have acquired more than 40.000 images and a large amount of additional data. In many cases 3D renderings of these volumetric images are not sufficient to solve certain scientific problems especially when multiple brains have to be considered. Therefore the researchers rely on additional data which is stored in databases. The problem here is that the scientists have two different data sources without a connection between them. On the one hand there are the volumetric images and on the other hand there is the additional data which has certain relations to the brains. This thesis proposes a software design concept to establish a connection between 3D renderings of volumetric images and additional data by using information visualizations. Highlighting techniques can be introduced to link volume visualizations of the brains to related data visualized by 2D information visualizations. Therefore the implementation of this design concept gets integrated into an existing scientific visualization environment. To evaluate this concept common neuro-biological use cases are introduced and it is described how the implementation of this design concept supports the work flow of the researchers.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Bauer_Johannes_2014_IWB/", } @article{Groeller_Eduard_2014_TSP, title = "The Spinel Explorer - Interactive Visual Analysis of Spinel Group Minerals", author = " Mar\'{i}a Luj\'{a}n Ganuza and Gabriela Ferracutti and Maria Florencia Gargiulo and Silvia Mabel Castro and Ernesto Bjerg and Eduard Gr\"{o}ller and Kresimir Matkovic", year = "2014", abstract = "Geologists usually deal with rocks that are up to several thousand million years old. They try to reconstruct the tectonic settings where these rocks were formed and the history of events that affected them through the geological time. The spinel group minerals provide useful information regarding the geological environment in which the host rocks were formed. They constitute excellent indicators of geological environments (tectonic settings) and are of invaluable help in the search for mineral deposits of economic interest. The current workflow requires the scientists to work with different applications to analyze spine data. They do use specific diagrams, but these are usually not interactive. The current workflow hinders domain experts to fully exploit the potentials of tediously and expensively collected data. In this paper, we introduce the Spinel Explorer-an interactive visual analysis application for spinel group minerals. The design of the Spinel Explorer and of the newly introduced interactions is a result of a careful study of geologists' tasks. The Spinel Explorer includes most of the diagrams commonly used for analyzing spinel group minerals, including 2D binary plots, ternary plots, and 3D Spinel prism plots. Besides specific plots, conventional information visualization views are also integrated in the Spinel Explorer. All views are interactive and linked. The Spinel Explorer supports conventional statistics commonly used in spinel minerals exploration. The statistics views and different data derivation techniques are fully integrated in the system. Besides the Spinel Explorer as newly proposed interactive exploration system, we also describe the identified analysis tasks, and propose a new workflow. We evaluate the Spinel Explorer using real-life data from two locations in Argentina: the Frontal Cordillera in Central Andes and Patagonia. We describe the new findings of the geologists which would have been much more difficult to achieve using the cur- ent workflow only. Very positive feedback from geologists confirms the usefulness of the Spinel Explorer.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "12", issn = "1077-2626", pages = "1913--1922", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_Eduard_2014_TSP/", } @phdthesis{Bernhard_Matthias_2014_VAC, title = "Visual Attention in Computer Graphics", author = "Matthias Bernhard", year = "2014", abstract = "This thesis is concerned with gaze analysis methods to study visual attention in interactive 3D computer-graphics applications, such as virtual environments or computer games. Under this scope, research has been carried out in two directions: On the one hand, it was investigated how gaze analysis in three-dimensional virtual environments can be advanced. On the other hand, approaches were explored which improve three-dimensional graphics by taking into account visual attention of a user. To advance gaze analysis in 3D computer graphics applications, two challenges have been addressed: First, inferring the object of attention at a certain point in time from the current output of an eye tracker – a technique which we denote as gaze-to-object mapping –, and second, deriving a statistical model for visual attention - a data structure we denote as importance map - from sequences of gaze samples recorded from many users. While addressing these challenges is a crucial step towards advancing gaze analysis and research on visual attention which employs modern computer graphics, the results may also be used in applications which attempt to perceptually optimize rendering. Thus, the third challenge addressed in this thesis was to explore an example application for attention-aware rendering techniques, where gaze-to-object mapping or importance maps can be employed to determine or predict the object of attention at run time. Thus, this thesis concludes with a pilot study on an application that dynamically adjusts the configuration of a stereo 3D display such that the object being attended by the user can be seen most comfortably.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Bernhard_Matthias_2014_VAC/", } @inproceedings{ymca, title = "YMCA - Your Mesh Comparison Application", author = "Johanna Schmidt and Reinhold Preiner and Thomas Auzinger and Michael Wimmer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2014", abstract = "Polygonal meshes can be created in several different ways. In this paper we focus on the reconstruction of meshes from point clouds, which are sets of points in 3D. Several algorithms that tackle this task already exist, but they have different benefits and drawbacks, which leads to a large number of possible reconstruction results (i.e., meshes). The evaluation of those techniques requires extensive comparisons between different meshes which is up to now done by either placing images of rendered meshes side-by-side, or by encoding differences by heat maps. A major drawback of both approaches is that they do not scale well with the number of meshes. This paper introduces a new comparative visual analysis technique for 3D meshes which enables the simultaneous comparison of several meshes and allows for the interactive exploration of their differences. Our approach gives an overview of the differences of the input meshes in a 2D view. By selecting certain areas of interest, the user can switch to a 3D representation and explore the spatial differences in detail. To inspect local variations, we provide a magic lens tool in 3D. The location and size of the lens provide further information on the variations of the reconstructions in the selected area. With our comparative visualization approach, differences between several mesh reconstruction algorithms can be easily localized and inspected.", month = nov, series = "VAST ", publisher = "IEEE Computer Society", note = "http://dx.doi.org/10.1109/VAST.2014.7042491", location = "Paris, France", booktitle = "IEEE Visual Analytics Science and Technology", keywords = "mesh comparison, 3D data exploration, focus+context, comparative visualization, Visual analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/ymca/", } @article{fan-2014-scfl, title = "Structure Completion for Facade Layouts", author = "Lubin Fan and Przemyslaw Musialski and Ligang Liu and Peter Wonka", year = "2014", abstract = "We present a method to complete missing structures in facade layouts. Starting from an abstraction of the partially observed layout as a set of shapes, we can propose one or multiple possible completed layouts. Structure completion with large missing parts is an ill-posed problem. Therefore, we combine two sources of information to derive our solution: the observed shapes and a database of complete layouts. The problem is also very difficult, because shape positions and attributes have to be estimated jointly. Our proposed solution is to break the problem into two components: a statistical model to evaluate layouts and a planning algorithm to generate candidate layouts. This ensures that the completed result is consistent with the observation and the layouts in the database.", month = nov, journal = "ACM Transactions on Graphics (ACM SIGGRAPH Asia 2014)", volume = "33", number = "6", pages = "210:1--210:11", keywords = "facade modeling, facade completion, structure completion, urban modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/fan-2014-scfl/", } @bachelorsthesis{MORARU-2014-CPD, title = "Cross-Platform Development Approaches tor the Mobile Device Platforms Android and iOS", author = "Oana Moraru", year = "2014", abstract = "This paper provides an introduction to cross-platform programming for mobile devices as well as an overview of different object-oriented programming -based cross-platform development approaches. The presented approaches are described, discussed and evaluated based on their advantages and disadvantages, as well as a practical implementation. The comparison relies on different aspects, including the preparation of the workspace, necessary programs, installations and plug-ins, their price, the supported platforms, the effort to implement, the ease with which the application programming interfaces can be used and much more. Thus the investment of time, money and energy, as well as other difficulties, which might occur while following an approach, constitute important factors which contribute to the results of the evaluation. In the end the paper suggests which might be the best and easiest approach, respectively which approaches are more appropriate than others in some certain cases.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "cross-platform development, mobile computing", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/MORARU-2014-CPD/", } @article{Guerrero-2014-TPS, title = "Partial Shape Matching using Transformation Parameter Similarity", author = "Paul Guerrero and Thomas Auzinger and Michael Wimmer and Stefan Jeschke", year = "2014", abstract = "In this paper, we present a method for non-rigid, partial shape matching in vector graphics. Given a user-specified query region in a 2D shape, similar regions are found, even if they are non-linearly distorted. Furthermore, a non-linear mapping is established between the query regions and these matches, which allows the automatic transfer of editing operations such as texturing. This is achieved by a two-step approach. First, point-wise correspondences between the query region and the whole shape are established. The transformation parameters of these correspondences are registered in an appropriate transformation space. For transformations between similar regions, these parameters form surfaces in transformation space, which are extracted in the second step of our method. The extracted regions may be related to the query region by a non-rigid transform, enabling non-rigid shape matching.", month = nov, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "33", pages = "1--14", keywords = "Shape Matching, Texture Transfer, Non-Rigid, Deformable, Edit Propagation, Partial", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Guerrero-2014-TPS/", } @bachelorsthesis{puenguentzky-2014-ht, title = "HistoryTime: A Chrome history visualization using WebGL", author = "Roman P\"{u}ng\"{u}ntzky2", year = "2014", abstract = "Even though modern web browsers offer history functionalities, only few people use it to re-visit previously visited websites. In this thesis we present HistoryTime, a 3D visualization of the Google Chrome browser history. The goal of this project was to visualize the content of a user’s web browsing history in an aesthetic way, as well as to increase the general motivation to use it. We developed a 3-dimensional, visually appealing extension for Google Chrome that offers various possibilities, sorting-modes and browsing-functionalities which should make exploring and searching for websites in the history more pleasant to use. The data is retrieved via the Chrome history API exclusively, and visualized in a WebGL environment using the three.js JavaScript 3D library. The prototype of HistoryTime was tested and compared to the standard Google Chrome browser history in the scope of a small user study. The results indicated that our extension offered a better usability overall, and also allowed to solve certain tasks that were not possible with the standard history.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/puenguentzky-2014-ht/", } @phdthesis{Guerrero_Paul_2014_EPG, title = "Edit Propagation using Geometric Analogies", author = "Paul Guerrero", year = "2014", abstract = "Modeling complex geometrical shapes, like city scenes or terrains with dense vegetation, is a time-consuming task that cannot be automated trivially. The problem of creating and editing many similar, but not identical models requires specialized methods that understand what makes these objects similar in order to either create new variations of these models from scratch or to propagate edit operations from one object to all similar objects. In this thesis, we present new methods to significantly reduce the effort required to model complex scenes. For 2D scenes containing deformable objects, such as fish or snakes, we present a method to find partial matches between deformed shapes that can be used to transfer localized properties such as texture between matching shapes. Shapes are considered similar if they are related by pointwise correspondences and if neighboring points have correspondences with similar transformation parameters. Unlike previous work, this approach allows us to successfully establish matches between strongly deformed objects, even in the presence of occlusions and sparse or unevenly distributed sets of matching features. For scenes consisting of 2D shape arrangements, such as floor plans, we propose methods to find similar locations in the arrangements, even though the arrangements themselves are dissimilar. Edit operations, such as object placements, can be propagated between similar locations. Our approach is based on simple geometric relationships between the location and the shape arrangement, such as the distance of the location to a shape boundary or the direction to the closest shape corner. Two locations are similar of they have many similar relations to their surrounding shape arrangement. To the best of our knowledge, there is no method that explicitly attempts to find similar locations in dissimilar shape arrangements. We demonstrate populating large scenes such as floor plans with hundreds of objects like pieces of furniture, using relatively few edit operations. Additionally, we show that providing several examples of an edit operation helps narrowing down the supposed modeling intention of the user and improves the quality of the edit propagation. A probabilistic model is learned from the examples and used to suggest similar edit operations. Also, extensions are shown that allow application of this method in 3D scenes. Compared to previous approaches that use entire scenes as examples, our method provides more user control and has no need for large databases of example scenes or domain-specific knowledge. We demonstrate generating 3D interior decoration and complex city scenes, including buildings with detailed facades, using only few edit operations.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Guerrero_Paul_2014_EPG/", } @article{birkeland_aasmund_2014_pums, title = "Perceptually Uniform Motion Space", author = "{\AA}smund Birkeland and Cagatay Turkay and Ivan Viola", year = "2014", abstract = "Flow data is often visualized by animated particles inserted into a ?ow ?eld. The velocity of a particle on the screen is typically linearly scaled by the velocities in the data. However, the perception of velocity magnitude in animated particles is not necessarily linear. We present a study on how different parameters affect relative motion perception. We have investigated the impact of four parameters. The parameters consist of speed multiplier, direction, contrast type and the global velocity scale. In addition, we investigated if multiple motion cues, and point distribution, affect the speed estimation. Several studies were executed to investigate the impact of each parameter. In the initial results, we noticed trends in scale and multiplier. Using the trends for the signi?cant parameters, we designed a compensation model, which adjusts the particle speed to compensate for the effect of the parameters. We then performed a second study to investigate the performance of the compensation model. From the second study we detected a constant estimation error, which we adjusted for in the last study. In addition, we connect our work to established theories in psychophysics by comparing our model to a model based on Stevens’ Power Law.", month = nov, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "11", issn = "1077-2626", pages = "1542--1554", keywords = "motion visualization, motion perception, animation, evauation, perceptual model", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/birkeland_aasmund_2014_pums/", } @talk{zsolnai-ist-invited-2014, title = "Light Transport with a Touch of Fluids", author = "Karoly Zsolnai-Feh\'{e}r", year = "2014", month = oct, event = "IST Austria", keywords = "photorealistic rendering, subsurface scattering, fluid simulation", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/zsolnai-ist-invited-2014/", } @article{Viola_Ivan_IIP, title = "Interactively illustrating polymerization using three-level model fusion", author = "Ivan Koles\'{a}r and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser", year = "2014", abstract = "Background: Research in cell biology is steadily contributing new knowledge about many aspects of physiological processes, both with respect to the involved molecular structures as well as their related function. llustrations of the spatio-temporal development of such processes are not only used in biomedical education, but also can serve scientists as an additional platform for in-silico experiments. Results: In this paper, we contribute a new, three-level modeling approach to illustrate physiological processes from the class of polymerization at different time scales. We integrate physical and empirical modeling, according to which approach best suits the different involved levels of detail, and we additionally enable a form of interactive steering, while the process is illustrated. We demonstrate the suitability of our approach in the context of several polymerization processes and report from a first evaluation with domain experts. Conclusion: We conclude that our approach provides a new, hybrid modeling approach for illustrating the process of emergence in physiology, embedded in a densely filled environment. Our approach of a complementary fusion of three systems combines the strong points from the different modeling approaches and is capable to bridge different spatial and temporal scales.", month = oct, issn = "1471-2105", journal = "BMC Bioinformatics 2014", number = "345", volume = "15", pages = "1--16", keywords = "Multi-agent modeling, L-system modeling, Biochemical visualization, Visualization of physiology, Polymerization", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_IIP/", } @bachelorsthesis{Fuerst_2014_BSc, title = "Control of Newtonian Fluids With Minimum Force Impact Using the Navier-Stokes Equations", author = "Patrick F\"{u}rst", year = "2014", abstract = "This thesis introduces a novel approach to control smoke towards a target density distribution with minimal force impact to reduce unnatural behavior and unconvincing visual results. The first part consists of an introduction to fluid simulations and fluid control followed by an exploration of previous research in the field of Computational fluid dynamics, especially in Computer Graphics. After that, the Navier-Stokes equations are introduced and a short overview on how to solve them is given. The last part describes our approach to controlling smoke based on biased diffusion and long-range force and shows the results of this research. Based on a criterion, which has emerged from the numerical solution of diffusion, the algorithm decides whether to apply forces or use diffusion to distribute the smoke resulting in a great reduction of forces applied to the smoke. Results show that the smoke reaches the target density faster and the motion is much less furious, which contributes to more natural results. Our algorithm is implemented in the open source animation software Blender and gives the artist access to smoke control parameters.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "fluid simulation, fluid control", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Fuerst_2014_BSc/", } @mastersthesis{CORNEL-2014-AFRS, title = "Analysis of Forced Random Sampling", author = "Daniel Cornel", year = "2014", abstract = "Stochastic sampling is an indispensable tool in computer graphics which allows approximating complex functions and integrals in finite time. Applications which rely on stochastic sampling include ray tracing, remeshing, stippling and texture synthesis. In order to cover the sample domain evenly and without regular patterns, the sample distribution has to guarantee spatial uniformity without regularity and is said to have blue-noise properties. Additionally, the samples need to be distributed according to an importance function such that the sample distribution satisfies a given sampling probability density function globally while being well distributed locally. The generation of optimal blue-noise sample distributions is expensive, which is why a lot of effort has been devoted to finding fast approximate blue-noise sampling algorithms. Most of these algorithms, however, are either not applicable in real time or have weak blue-noise properties. Forced Random Sampling is a novel algorithm for real-time importance sampling. Samples are generated by thresholding a precomputed dither matrix with the importance function. By the design of the matrix, the sample points show desirable local distribution properties and are adapted to the given importance. In this thesis, an efficient and parallelizable implementation of this algorithm is proposed and analyzed regarding its sample distribution quality and runtime performance. The results are compared to both the qualitative optimum of blue-noise sampling and the state of the art of real-time importance sampling, which is Hierarchical SampleWarping. With this comparison, it is investigated whether Forced Random Sampling is competitive with current sampling algorithms. The analysis of sample distributions includes several discrepancy measures and the sample density to evaluate their spatial properties as well as Fourier and differential domain analyses to evaluate their spectral properties. With these established methods, it is shown that Forced Random Sampling generates samples with approximate blue-noise properties in real time. Compared to the state of the art, the proposed algorithm is able to generate samples of higher quality with less computational effort and is therefore a valid alternative to current importance sampling algorithms. ", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "global illumination, Poisson disk sampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/CORNEL-2014-AFRS/", } @bachelorsthesis{WINKLER-2013-AMO, title = "Advanced Modifications of a Basic Rendering Framework", author = "Gernot Winkler", year = "2014", abstract = "In this thesis an overview about my work on the framework for the lab course “Introduction to computer graphics” will be given. At first the current situation is explained and what extensions should be made. It is described how task variations are implemented with an ANT-script. Also some computer graphics algorithms are evaluated on how well they would perform in the framework and it is explained how they are implemented or why not. The major topics are Weiler-Atherton clipping and why it is problematic, different methods for backface culling and the Oren-Nayar and Cook-Torrance shading models. The last chapter explains the new transparency feature that has been implemented in detail and why order independent transparency (OIT) has been chosen over BSP-Trees including a extensive comparisons.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/WINKLER-2013-AMO/", } @inproceedings{waldner-2014-ghi, title = "Graphical Histories of Information Foraging", author = "Manuela Waldner and Stefan Bruckner and Ivan Viola", year = "2014", abstract = "During information foraging, knowledge workers iteratively seek, filter, read, and extract information. When using multiple information sources and different applications for information processing, re-examination of activities for validation of previous decisions or re-discovery of previously used information sources is challenging. In this paper, we present a novel representation of cross-application histories to support recall of past operations and re-discovery of information resources. Our graphical history consists of a cross-scale visualization combining an overview node-link diagram of used desktop resources with nested (animated) snapshot sequences, based on a recording of the visual screen output during the users’ desktop work. This representation makes key elements of the users’ tasks visually stand out, while exploiting the power of visual memory to recover subtle details of their activities. In a preliminary study, users found our graphical history helpful to recall details of an information foraging task and commented positively on the ability to expand overview nodes into snapshot and video sequences.", month = oct, isbn = "978-1-4503-2542-4", publisher = "ACM", organization = "NordiCHI’14 - Nordic Conference on Human-Computer Interaction", location = "Helsinki, Finland", booktitle = "Proceedings of the 8th Nordic Conference on Human-Computer Interaction: Fun, Fast, Foundational ", pages = "295--304", keywords = "Graph visualization, Interaction history, Provenance", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/", } @bachelorsthesis{ZAU14, title = "Unity OPC Importer and Renderer", author = "Stefan Zaufl", year = "2014", abstract = "The goal of this bachelor thesis is to implement an OPC importer for the Unity 3D engine. OPCs should be rendered as Unity Terrain Game Objects. In a first approach a simple CPU based tessellation will be implemented to realise LODs. The final version will use tessellation shading. ", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Tesselation Shading, Ordered Point Clouds, Unity 3D", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/ZAU14/", } @inproceedings{Viola_Ivan_DAC, title = "Deriving Anatomical Context from 4D Ultrasound", author = "Markus M\"{u}ller and Linn E. S. Helljesen and Raphael Prevost and Ivan Viola and Kim Nylund and Odd Helge Gilja and Nassir Navab and Wolfgang Wein", year = "2014", abstract = "Real-time three-dimensional (also known as 4D) ultrasound imaging using matrix array probes has the potential to create large-volume information of entire organs such as the liver without external tracking hardware. This information can in turn be placed into the context of a CT or MRI scan of the same patient. However for such an approach many image processing challenges need to be overcome and sources of error addressed, including reconstruction drift, anatomical deformations, varying appearance of anatomy, and imaging artifacts. In this work,we present a fully automatic system including robust image-based ultrasound tracking, a novel learning-based global initialization of the anatomical context, and joint mono- and multi-modal registration. In an evaluation on 4D US sequences and MRI scans of eight volunteers we achieve automatic reconstruction and registration without any user interaction, assess the registration errors based on physician-defined landmarks, and demonstrate realtime tracking of free-breathing sequences.", month = sep, isbn = "978-3-905674-62-0", publisher = "Eurographics Association", note = "The electronic version of the proceedings is available from the Eurographics Digital Library at http://diglib.eg.org", location = "Vienna, Austria", issn = "2070-5778", event = "4th Eurographics Workshop on Visual Computing for Biology and Medicine", editor = "Ivan Viola and Katja Buehler and Timo Ropinski", booktitle = "Proceedings of EG VCBM14", pages = "173--180", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_DAC/", } @incollection{Viola_Ivan_UVP, title = "The Ultrasound Visualization Pipeline", author = "{\AA}smund Birkeland and Veronika Solteszova and Dieter H\"{o}nigmann and Odd Helge Gilja and Svein Brekke and Timo Ropinski and Ivan Viola", year = "2014", abstract = "Radiology is one of the main tools in modern medicine. A numerous set of deceases, ailments and treatments utilize accurate images of the patient. Ultrasound is one of the most frequently used imaging modality in medicine. The high spatial resolution, its interactive nature and non-invasiveness makes it the first choice in many examinations. Image interpretation is one of ultrasound’s main challenges. Much training is required to obtain a confident skill level in ultrasound-based diagnostics. State-of-the-art graphics techniques is needed to provide meaningful visualizations of ultrasound in real-time. In this paper we present the process-pipeline for ultrasound visualization, including an overview of the tasks performed in the specific steps. To provide an insight into the trends of ultrasound visualization research, we have selected a set of significant publications and divided them into a technique-based taxonomy covering the topics pre-processing, segmentation, registration, rendering and augmented reality. For the different technique types we discuss the difference between ultrasound-based techniques and techniques for other modalities.", month = sep, address = "http://link.springer.com/chapter/10.1007%2F978-1-4471-6497-5_24", booktitle = "Scientific Visualization", chapter = "Uncertainty, Multifield, Biomedical, and Scalable Visualization", editor = "Charles D. Hansen, Min Chen, Christopher R. Johnson, Arie E. Kaufman, Hans Hagen", isbn = "978-1-4471-6496-8", publisher = "Springer London", series = "Mathematics and Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_UVP/", } @inproceedings{Viola_Ivan_VDP, title = "Visibility-Driven Processing of Streaming Volume Data", author = "Veronika Solteszova and {\AA}smund Birkeland and Ivan Viola and Stefan Bruckner", year = "2014", abstract = "In real-time volume data acquisition, such as 4D ultrasound, the raw data is challenging to visualize directly without additional processing. Noise removal and feature detection are common operations, but many methods are too costly to compute over the whole volume when dealing with live streamed data. In this paper, we propose a visibility-driven processing scheme for handling costly on-the-fly processing of volumetric data in real-time. In contrast to the traditional visualization pipeline, our scheme utilizes a fast computation of the potentially visible subset of voxels which significantly reduces the amount of data required to process. As filtering operations modify the data values which may affect their visibility, our method for visibility-mask generation ensures that the set of elements deemed visible does not change after processing. Our approach also exploits the visibility information for the storage of intermediate values when multiple operations are performed in sequence, and can therefore significantly reduce the memory overhead of longer filter pipelines. We provide a thorough technical evaluation of the approach and demonstrate it on several typical scenarios where on-the-fly processing is required.", month = sep, isbn = "978-3-905674-62-0", publisher = "Eurographics Association", location = "Vienna, Austria", issn = "2070-5778", event = "4th EG Workshop on Visual Computing and Biology Medicine", editor = "Ivan Viola and Katja Buehler and Timo Ropinski", booktitle = "Proceedings of EG VCBM 2014", pages = "127--136", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_VDP/", } @mastersthesis{steiner-2014-da, title = "Structure-aware shape manipulation", author = "Bernhard Steiner", year = "2014", abstract = "In computer graphics one of the most expensive fields is content creation. While today the rendering part is highly sophisticated and automated, 3d modeling is still a very challenging task due to the steep learning curve and the amount of work time required. Although there are a lot of free model databases available in the internet, this models are in general not perfectly suited to the customers needs and have to be adapted. Structure-aware shape manipulation deals with the complexity of creating variations, or adapting a given input model while retaining global features like symmetry and connectivity. The majority of existing algorithms only try to preserve structural features, or have very limited support for adapting the overall structure of the model to changes made by the user. The lack of complex structural changes in the model, for example allowing a wheel to change the number of strokes, limits the number of possible variations this algorithms can generate. The aim of this thesis is to exploit the possibilities of using curves and curve spaces to find a better representation of the input model, allowing for complex structural adaptions. In addition to this the usefulness of a multi-layered graphs to represent the model and it's constraints should be investigated. ", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = " structure-aware model manipulation, modelling, cg", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/steiner-2014-da/", } @bachelorsthesis{Gehrer_Daniel_CUI, title = "CellUnity an Interactive Tool for Illustrative Visualization of Molecular Reactions", author = "Daniel Gehrer", year = "2014", abstract = "CellUnity is a tool for interactive visualization of molecular reactions using the Unity game engine. Current mesoscale visualizations commonly utilize the results of particle-based simulations, which account for spatial information of each single particle and are supposed to mimic a realistic behavior of the metabolites. However, this approach employs stochastic simulation methods which do not offer any control over the visualized output. CellUnity, on the other hand, exploits the results of deterministic simulations which are purely quantitative and in that way offering full user control over the spatial locations of the reactions. The user is able to trigger reactions on demand instead of having to wait or search for a specific type of reaction event, while the quantities of displayed molecules would still be in accordance with real scientific data. CellUnity exploits the simulation results in real time and allows the user to freely modify simulation parameters while the system is running. The tool was realized in Unity, a cross-platform game engine that also comprises a free version with adequate functionality and therefore enables easy deployment of the project.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Gehrer_Daniel_CUI/", } @article{arikan-2014-pcvis, title = "Large-Scale Point-Cloud Visualization through Localized Textured Surface Reconstruction", author = "Murat Arikan and Reinhold Preiner and Claus Scheiblauer and Stefan Jeschke and Michael Wimmer", year = "2014", abstract = "In this paper, we introduce a novel scene representation for the visualization of large-scale point clouds accompanied by a set of high-resolution photographs. Many real-world applications deal with very densely sampled point-cloud data, which are augmented with photographs that often reveal lighting variations and inaccuracies in registration. Consequently, the high-quality representation of the captured data, i.e., both point clouds and photographs together, is a challenging and time-consuming task. We propose a two-phase approach, in which the first (preprocessing) phase generates multiple overlapping surface patches and handles the problem of seamless texture generation locally for each patch. The second phase stitches these patches at render-time to produce a high-quality visualization of the data. As a result of the proposed localization of the global texturing problem, our algorithm is more than an order of magnitude faster than equivalent mesh-based texturing techniques. Furthermore, since our preprocessing phase requires only a minor fraction of the whole dataset at once, we provide maximum flexibility when dealing with growing datasets.", month = sep, issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "9", volume = "20", pages = "1280--1292", keywords = "image-based rendering, large-scale models, color, surface representation", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/arikan-2014-pcvis/", } @misc{fmistelbauer-2014-adict, title = "ActiveDICOM - Enhancing Static Medical Images with Interaction", author = "Florian Mistelbauer and Gabriel Mistelbauer and Eduard Gr\"{o}ller", year = "2014", abstract = "Digital Imaging and Communications in Medicine (DICOM) is a well-establish standard in medical imaging, consisting not only of image data, but sensitive data such as patient and examination information. Nowadays, although having a large variety of advanced rendering techniques available, DICOM images are still generated and sent to the Picture Archiving and Communication System (PACS). These images are then fetched by the medical doctor from a workstation and used for medical reporting. The user has no other possibilities than being able to change the windowing function for displaying the DICOM images. If a certain region is of special interest, either images of the whole data set are generated or have to be specifically requested. Both approaches consume a considerable amount of time. Secondly, the image generation on demand remains pending until done by the responsible assistant. Despite supporting a broad range of features and being widely applied, DICOM images remain static. We propose a visualization mapping language, Active DICOM Script (ADICT), which enhances conventional DICOM with interactive elements by combining heterogeneous data, interaction and visualization. Such DICOM images are then called Active Digital Imaging and Communications in Medicine (ActiveDICOM).", month = sep, series = "EG VCBM 2014", location = "Vienna, Austria", event = "Eurographics Workshop on Visual Computing for Biology and Medicine", booktitle = "Posters at Eurographics Workshop on Visual Computing for Biology and Medicine", Conference date = "Poster presented at Eurographics Workshop on Visual Computing for Biology and Medicine (2014-09-03--2014-09-05)", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/fmistelbauer-2014-adict/", } @mastersthesis{Langer_Edith_IR1, title = "Image Retrieval on Co-registered Confocal Microscopy Image Collections", author = "Edith Langer", year = "2014", abstract = "Biologists at the Institute of Molecular Pathology (IMP) in Vienna scan brains of the species Drosophila melanogaster with a confocal microscope to find relations between genes, brain structure and behavior. The database contains now more than 40.000 volumetric images, which makes it time-consuming to search for an image of interest. For biologists it would be very help-ful to have a method which can be used to search for specific images and works on the perceptual level of content. The aim of this thesis is to develop a Content Based Image Retrieval (CBIR) method customized for 3D fly brain images. A biologist can choose an image which shows interesting gene expressions and as result images which are visually similar should be retrieved. Exhaustive lit- erature research shows that in the biological field nothing comparable exists. However, CBIR plays an important role in the medical domain, which deals also with 3D images and therefore publications in this area can be seen as related. The voxelwise comparison of two images would be on the one hand computationally expensive and on the other hand not practicable due to image registration errors and anatomical variations of neuronal structures. Creating maximum intensity projections from three directions and applying a principal component analysis on the gray values overcomes the before mentioned drawbacks and delivers satisfying results. The fly brain can be divided into regions, so-called neuropils. The proposed method works on the basis of neuropils. This has, among others, the advantage that not only a global similarity can be computed, but also a comparison of images based on only some of the neuropils is possible. An extensive evaluation of the developed method is given including a parameter space exploration. For example, different lengths of the feature vectors, which describe a fly brain in a lower dimensional space, are tried and also different distance measures are tested. The evaluation shows satisfying results and that the method facilitates the work of biologists when they are looking for similar images to create a hypothesis about the connection of genes and behavior. ", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Langer_Edith_IR1/", } @article{preiner2014clop, title = "Continuous Projection for Fast L1 Reconstruction", author = "Reinhold Preiner and Oliver Mattausch and Murat Arikan and Renato Pajarola and Michael Wimmer", year = "2014", abstract = "With better and faster acquisition devices comes a demand for fast robust reconstruction algorithms, but no L1-based technique has been fast enough for online use so far. In this paper, we present a novel continuous formulation of the weighted locally optimal projection (WLOP) operator based on a Gaussian mixture describing the input point density. Our method is up to 7 times faster than an optimized GPU implementation of WLOP, and achieves interactive frame rates for moderately sized point clouds. We give a comprehensive quality analysis showing that our continuous operator achieves a generally higher reconstruction quality than its discrete counterpart. Additionally, we show how to apply our continuous formulation to spherical mixtures of normal directions, to also achieve a fast robust normal reconstruction. Project Page: https://www.cg.tuwien.ac.at/~preiner/projects/clop/", month = aug, journal = "ACM Transactions on Graphics (Proc. of ACM SIGGRAPH 2014)", volume = "33", number = "4", issn = "0730-0301", doi = "10.1145/2601097.2601172", pages = "47:1--47:13", keywords = "point set, Gaussian mixture, Hierarchical EM, upsampling, dynamic reconstruction, L1 reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/preiner2014clop/", } @article{bernhard-2014-GTOM, title = "Gaze-To-Object Mapping During Visual Search in 3D Virtual Environments ", author = "Matthias Bernhard and Efstathios Stavrakis and Michael Hecher and Michael Wimmer", year = "2014", abstract = "Stimuli obtained from highly dynamic 3D virtual environments and synchronous eye-tracking data are commonly used by algorithms that strive to correlate gaze to scene objects, a process referred to as Gaze-To-Object Mapping (GTOM). We propose to address this problem with a probabilistic approach using Bayesian inference. The desired result of the inference is a predicted probability density function (PDF) specifying for each object in the scene a probability to be attended by the user. To evaluate the quality of a predicted attention PDF, we present a methodology to assess the information value (i.e., likelihood) in the predictions of dierent approaches that can be used to infer object attention. To this end, we propose an experiment based on a visual search task which allows us to determine the object of attention at a certain point in time under controlled conditions. We perform this experiment with a wide range of static and dynamic visual scenes to obtain a ground-truth evaluation data set, allowing us to assess GTOM techniques in a set of 30 particularly challenging cases.", month = aug, journal = "ACM Transactions on Applied Perception (Special Issue SAP 2014)", volume = "11", number = "3", issn = "1544-3558", pages = "14:1--14:17", keywords = "object-based attention, eye-tracking, virtual environments, visual attention", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/", } @bachelorsthesis{Trautner_Thomas_2014_VAE, title = "Visualizing Archaeological Excavations based on Unity3D", author = "Thomas Trautner", year = "2014", abstract = "As part of an archaeological excavation huge amounts of different types of data, for example laser scan point-clouds, triangulated surface meshes, pictures or drawings of finds, find attributes like location, age, condition and description or layers of excavated earth are collected. This detailed documentation is important to give archaeologists the possibility to analyze the collected data at a later date since the find spot might not be accessible anymore. Unfortunately all the accumulated data is separately saved and consequently complex to explore. Therefore we present a novel solution that allows the user to digitally explore a virtual archaeological excavation in real-time. With our approach we can not only visualize different types of textured meshes and finds but allow the user to draw on surfaces to mark areas of certain interest that need further exploration, enable explosion views to investigate composition of different layers of earth and arbitrary slicing of the threedimensional mesh structure to better visualize cross-sections and an easier tracing of accumulation points of finds. The result of this work is a new powerful tool that will support the analysis of future excavations. All results and the implementation itself will be presented as part of this thesis.", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Trautner_Thomas_2014_VAE/", } @inproceedings{kolesar-ivan-2014-polymers, title = "Illustrating Polymerization using Three-level Model Fusion", author = "Ivan Koles\'{a}r and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser", year = "2014", abstract = "Research in cell biology is steadily contributing new knowledge about many di?erent aspects of physiological processes like polymerization, both with respect to the involved molecular structures as well as their related function. Illustrations of the spatio-temporal development of such processes are not only used in biomedical education, but also can serve scientists as an additional platform for in-silico experiments. In this paper, we contribute a new, three-level modeling approach to illustrate physiological processes from the class of polymerization at di?erent time scales. We integrate physical and empirical modeling, according to which approach suits the di?erent involved levels of detail best, and we additionally enable a simple form of interactive steering while the process is illustrated. We demonstrate the suitability of our approach in the context of several polymerization processes and report from a ?rst evaluation with domain experts.", month = jul, publisher = "IEEE Digital Library", organization = "4th Symposium on Biological Data Visualization (in Conjunction with the International Conference on Intelligent Systems for Molecular Biology (ISMB 2014)) ", location = "Boston, USA", booktitle = "Proceedings of IEEE BioVis 2014", pages = "1--22", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/kolesar-ivan-2014-polymers/", } @mastersthesis{Moellinger_Christian_IDE2, title = "Interactive Data Editing of Time-Dependent Data in Visual Analysis", author = "Christian M\"{o}llinger", year = "2014", abstract = "In the so called information age, data is widely available. Sources include data collection as a byproduct (e.g., log files on a server, or as a more concrete example, movement profiles of smartphone users) and data generation for particular purposes (e.g., simulation runs, data gathered from sensors). To benefit from this huge amount of available data, the data must be analyzed and relevant information must be extracted. Visual Analytics has become an important approach to identify and extract relevant information from data, especially with big data sets. However, data can also contain erroneous values for different reasons, e.g., caused by defect sensors. In data warehousing projects, transforming and edit the data into a usable state can account up for 80% of the cost and the development time. This diploma thesis focuses on time-dependent data and presents an extension for the existing visual analytics framework VISPLORE, to support the user in the process of data editing. Using plausibility rules, the user can define data checks and imputation strategies. Three different overviews, a data-based overview, a group-based overview, and a rule-based overview provide insight into the structure of implausible data values and the defined plausibility rules. Implausible values can be imputed using the defined imputation strategies and existing visualization techniques are extended to enable the user getting an overview of the modified values. Real-world data is used to demonstrate two use-cases. Limitations of the provided overviews, e.g., scalability for a large number of plausibility rules, are discussed and ideas for future work are outlined. ", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Moellinger_Christian_IDE2/", } @article{hecher-2014-MH, title = "A Comparative Perceptual Study of Soft Shadow Algorithms", author = "Michael Hecher and Matthias Bernhard and Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2014", abstract = "We performed a perceptual user study of algorithms that approximate soft shadows in real time. Although a huge body of soft-shadow algorithms have been proposed, to our knowledge this is the first methodical study for comparing different real-time shadow algorithms with respect to their plausibility and visual appearance. We evaluated soft-shadow properties like penumbra overlap with respect to their relevance to shadow perception in a systematic way, and we believe that our results can be useful to guide future shadow approaches in their methods of evaluation. In this study, we also capture the predominant case of an inexperienced user observing shadows without comparing to a reference solution, such as when watching a movie or playing a game. One important result of this experiment is to scientifically verify that real-time soft-shadow algorithms, despite having become physically based and very realistic, can nevertheless be intuitively distinguished from a correct solution by untrained users.", month = jun, issn = "1544-3558", journal = "ACM Transactions on Applied Perception", number = "5", volume = "11", pages = "5:1--5:21", keywords = "Perception Studies, Soft Shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/", } @article{LUKSCH-2014-RTR, title = "Real-Time Rendering of Glossy Materials with Regular Sampling", author = "Christian Luksch and Robert F. Tobler and Thomas M\"{u}hlbacher and Michael Schw\"{a}rzler and Michael Wimmer", year = "2014", abstract = "Rendering view-dependent, glossy surfaces to increase the realism in real-time applications is a computationally complex task, that can only be performed by applying some approximations—especially when immediate changes in the scene in terms of material settings and object placement are a necessity. The use of environment maps is a common approach to this problem, but implicates performance problems due to costly pre-filtering steps or expensive sampling. We, therefore, introduce a regular sampling scheme for environment maps that relies on an efficient MIP-map-based filtering step, and minimizes the number of necessary samples for creating a convincing real-time rendering of glossy BRDF materials.", month = jun, journal = "The Visual Computer", volume = "30", number = "6-8", issn = "0178-2789", pages = "717--727", keywords = "real-time rendering , BRDFs", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/LUKSCH-2014-RTR/", } @article{lemuzic-2014-ivm, title = "Illustrative Visualization of Molecular Reactions using Omniscient Intelligence and Passive Agents ", author = "Mathieu Le Muzic and Julius Parulek and Anne-Kristin Stavrum and Ivan Viola", year = "2014", abstract = "In this paper we propose a new type of a particle systems, tailored for illustrative visualization purposes, in particular for visualizing molecular reactions in biological networks. Previous visualizations of biochemical processes were exploiting the results of agent-based modeling. Such modeling aims at reproducing accurately the stochastic nature of molecular interactions. However, it is impossible to expect events of interest happening at a certain time and location, which is impractical for storytelling. To obtain the means of controlling molecular interactions, we propose to govern passive agents with an omniscient intelligence, instead of giving to the agents the freedom of initiating reaction autonomously. This makes it possible to generate illustrative animated stories that communicate the functioning of the molecular machinery. The rendering performance delivers for interactive framerates of massive amounts of data, based on the dynamic tessellation capabilities of modern graphics cards. Finally, we report an informal expert feedback we obtained from the potential users.", month = jun, journal = "Computer Graphics Forum", volume = "33", number = "3", note = "Article first published online: 12 JUL 2014", pages = "141--150", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/lemuzic-2014-ivm/", } @article{Groeller_2014_UPS, title = "Guest editorial—Uncertainty and parameter space analysis in visualization", author = "Christoph Heinzl and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2014", abstract = "Within the past decades visualization advanced to a powerful means of exploring and analyzing data. Recent developments in both hard- and software contributed to previously unthinkable evaluations and visualizations of data with strongly increasing sizes and levels of complexity. Providing just insight into available data of a problem seems not to be sufficient anymore: Uncertainty and parameter space analyses in visualization are becoming more prevalent and may be found in astronomic, (bio)-medical, industrial, and engineering applications. The major goal is to find out, at which stage of the pipeline - from data acquisition to the final rendering of the output image - how much uncertainty is introduced and consequently how the desired result (e.g., a dimensional measurement feature) is affected. Therefore effective methods and techniques are required by domain specialists, which help to understand how data is generated, how reliable is the generated data, and where and why data is uncertain. Furthermore, as the problems to investigate are becoming increasingly complex, also finding suitable algorithms providing the desired solution tends to be more difficult. Additional questions may arise, e.g., how does a slight parameter change modify the result, how stable is a parameter, in which range is a parameter stable or which parameter set is optimal for a specific problem. Metaphorically speaking, an algorithm for solving a problem may be seen as finding a path through some rugged terrain (the core problem) ranging from the high grounds of theory to the haunted swamps of heuristics. There are many different paths through this terrain with different levels of comfort, length, and stability. Finding all possible paths corresponds in our case to doing an analysis of all possible parameters of a problem solving algorithm, which yields a typically multi-dimensional parameter space. This parameter space allows for an analysis of the quality and stability of a specific parameter set. In many cases of conventional visualization approaches the issues of uncertainty and parameter space analyses are neglected. For a long time, uncertainty - if visualized at all - used to be depicted as blurred data. But in most cases the uncertainty in the base data is not considered at all and just the quantities of interest are calculated. And even to calculate these quantities of interest, too often an empirically found parameter set is used to parameterize the underlying algorithms without exploring its sensitivity to changes and without exploring the whole parameter space to find the global or a local optimum. This tutorial aims to open minds and to look at our data and the parameter sets of our algorithms with a healthy skepticism. In the tutorial we combine uncertainty visualization and parameter space analyses which we believe is essential for the acceptance and applicability of future algorithms and techniques. The tutorial provides six sessions starting with an overview of uncertainty visualization including a historical perspective, uncertainty modeling and statistical visualization. The second part of the tutorial will be dedicated to structural uncertainty, parameter space analysis, industrial applications of uncertainty visualization and an outlook in this domain. ", month = jun, journal = "Computer & Graphics", volume = "41", pages = "A1--A2", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_2014_UPS/", } @article{amirkhanov-2014-ama, title = "InSpectr: Multi-Modal Exploration, Visualization, and Analysis of Spectral Data", author = "Artem Amirkhanov and Bernhard Fr\"{o}hler and Johann Kastner and Eduard Gr\"{o}ller and Christoph Heinzl", year = "2014", abstract = "This paper addresses the increasing demand in industry for methods to analyze and visualize multimodal data involving a spectral modality. Two data modalities are used: high-resolution X-ray computed tomography (XCT) for structural characterization and low-resolution X-ray fluorescence (XRF) spectral data for elemental decomposition. We present InSpectr, an integrated tool for the interactive exploration and visual analysis of multimodal, multiscalar data. The tool has been designed around a set of tasks identified by domain experts in the fields of XCT and XRF. It supports registered single scalar and spectral datasets optionally coupled with element maps and reference spectra. InSpectr is instantiating various linked views for the integration of spatial and non-spatial information to provide insight into an industrial component’s structural and material composition: views with volume renderings of composite and individual 3D element maps visualize global material composition; transfer functions defined directly on the spectral data and overlaid pie-chart glyphs show elemental composition in 2D slice-views; a representative aggregated spectrum and spectra density histograms are introduced to provide a global overview in the spectral view. Spectral magic lenses, spectrum probing and elemental composition probing of points using a pie-chart view and a periodic table view aid the local material composition analysis. Two datasets are investigated to outline the usefulness of the presented techniques: a 3D virtually created phantom with a brass metal alloy and a real-world 2D water phantom with insertions of gold, barium, and gadolinium. Additionally a detailed user evaluation of the results is provided.", month = jun, journal = "Computer Graphics Forum", volume = "33", number = "3", note = "Article first published online: 12 JUL 2014", pages = "91--100", keywords = "multi-modal data, XRF, industrial computed tomography, linked views, spectral data", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/amirkhanov-2014-ama/", } @article{Rautek_Peter_2014_TUC, title = "Towards an Unbiased Comparison of CC, BCC, and FCC Lattices in Terms of Prealiasing", author = "Viktor Vad and Bal\'{a}zs Cs\'{e}bfalvi and Peter Rautek and Eduard Gr\"{o}ller", year = "2014", abstract = "In the literature on optimal regular volume sampling, the Body-Centered Cubic (BCC) lattice has been proven to be optimal for sampling spherically band-limited signals above the Nyquist limit. On the other hand, if the sampling frequency is below the Nyquist limit, the Face-Centered Cubic (FCC) lattice was demonstrated to be optimal in reducing the prealiasing effect. In this paper, we confirm that the FCC lattice is indeed optimal in this sense in a certain interval of the sampling frequency. By theoretically estimating the prealiasing error in a realistic range of the sampling frequency, we show that in other frequency intervals, the BCC lattice and even the traditional Cartesian Cubic (CC) lattice are expected to minimize the prealiasing. The BCC lattice is superior over the FCC lattice if the sampling frequency is not significantly below the Nyquist limit. Interestingly, if the original signal is drastically undersampled, the CC lattice is expected to provide the lowest prealiasing error. Additionally, we give a comprehensible clarification that the sampling efficiency of the FCC lattice is lower than that of the BCC lattice. Although this is a well-known fact, the exact percentage has been erroneously reported in the literature. Furthermore, for the sake of an unbiased comparison, we propose to rotate the Marschner-Lobb test signal such that an undue advantage is not given to either lattice.", month = jun, journal = "Computer Graphics Forum", volume = "33", number = "3", pages = "81--90", keywords = " Image representation—Volumetric, Picture/Image Generation—Display algorit, Categories and Subject Descriptors", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Rautek_Peter_2014_TUC/", } @article{Viola_Ivan_CLD, title = "Continuous Levels-of-Detail and Visual Abstraction for Seamless Molecular Visualization", author = "Julius Parulek and Daniel J\"{o}nsson and Timo Ropinski and Stefan Bruckner and Anders Ynnerman and Ivan Viola", year = "2014", abstract = "Molecular visualization is often challenged with rendering of large molecular structures in real time. We introduce a novel approach that enables us to show even large protein complexes. Our method is based on the level-of-detail concept, where we exploit three different abstractions combined in one visualization. Firstly, molecular surface abstraction exploits three different surfaces, solvent-excluded surface (SES), Gaussian kernels and van der Waals spheres, combined as one surface by linear interpolation. Secondly, we introduce three shading abstraction levels and a method for creating seamless transitions between these representations. The SES representation with full shading and added contours stands in focus while on the other side a sphere representation of a cluster of atoms with constant shading and without contours provide the context. Thirdly, we propose a hierarchical abstraction based on a set of clusters formed on molecular atoms. All three abstraction models are driven by one importance function classifying the scene into the near-, mid- and far-field. Moreover, we introduce a methodology to render the entire molecule directly using the A-buffer technique, which further improves the performance. The rendering performance is evaluated on series of molecules of varying atom counts.", month = may, issn = "0167-7055", journal = "Computer Graphics Forum", number = "6", volume = "33", pages = "276--287", keywords = "clustering, implicit surfaces, level of detail algorithms, scientific visualization, Computer Applications", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_CLD/", } @inproceedings{Radwan-2014-CDR, title = "Efficient Collision Detection While Rendering Dynamic Point Clouds", author = "Mohamed Radwan and Stefan Ohrhallinger and Michael Wimmer", year = "2014", abstract = "A recent trend in interactive environments is the use of unstructured and temporally varying point clouds. This is driven by both affordable depth cameras and augmented reality simulations. One research question is how to perform collision detection on such point clouds. State-of-the-art methods for collision detection create a spatial hierarchy in order to capture dynamic point cloud surfaces, but they require O(NlogN) time for N points. We propose a novel screen-space representation for point clouds which exploits the property of the underlying surface being 2D. In order for dimensionality reduction, a 3D point cloud is converted into a series of thickened layered depth images. This data structure can be constructed in O(N) time and allows for fast surface queries due to its increased compactness and memory coherency. On top of that, parts of its construction come for free since they are already handled by the rendering pipeline. As an application we demonstrate online collision detection between dynamic point clouds. It shows superior accuracy when compared to other methods and robustness to sensor noise since uncertainty is hidden by the thickened boundary.", month = may, isbn = "978-1-4822-6003-8", publisher = "Canadian Information Processing Society", location = "Montreal, Quebec, Canada ", issn = "0713-5424", event = "Graphics Interface 2014", booktitle = "Proceedings of the 2014 Graphics Interface Conference", pages = "25--33", keywords = "bounding volumes, layered depth images, collision detection, point cloud, dynamic", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Radwan-2014-CDR/", } @inproceedings{charpenay-2014-sgn, title = "Sampling Gabor Noise in the Spatial Domain", author = "Victor Charpenay and Bernhard Steiner and Przemyslaw Musialski", year = "2014", abstract = "Gabor noise is a powerful technique for procedural texture generation. Contrary to other types of procedural noise, its sparse convolution aspect makes it easily controllable locally. In this paper, we demonstrate this property by explicitly introducing spatial variations. We do so by linking the sparse convolution process to the parametrization of the underlying surface. Using this approach, it is possible to provide control maps for the parameters in a natural and convenient way. In order to derive intuitive control of the resulting textures, we accomplish a small study of the influence of the parameters of the Gabor kernel with respect to the outcome and we introduce a solution where we bind values such as the frequency or the orientation of the Gabor kernel to a user-provided control map in order to produce novel visual effects.", month = may, isbn = "978-80-223-3601-7", publisher = "ACM Press", location = "Smolenice castle, Slovakia", editor = "Diego Gutierrez", booktitle = "Proceedings of the 30th Spring Conference on Computer Graphics - SCCG ", pages = "79--82", keywords = "texture synthesis, Gabor noise, procedural texture", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/charpenay-2014-sgn/", } @techreport{Purgathofer-2014-LSM, title = "A Live Services Marketplace: Telepresence through Remote Humans", author = "Werner Purgathofer and Hans J\"{u}rgen Pfisterer", year = "2014", abstract = "The basic idea is to use state-of-the-art telecommunication technology to implement the virtual presence of a person at a remote location. The concept is described with the use of modern mobile phones (smartphones), but several other devices with similar or extended capabilities could be used instead, including pads and notebooks, action cams and augmented reality glasses (e.g. Google eyeglasses). The person, who wants to be tele-present somewhere (let’s call this person the “pilot”), uses the smartphone to communicate with a person at the remote site (let’s call this person the “flyer”). The contact includes an acoustic connection, i.e. they can talk to each other, and a video connection, i.e. at least the pilot sees what the flyer is doing. The Live Services Marketplace describes a business how to connect pilots with flyers. Flyers can register their willingness to act as telepresence avatars for selected tasks from a list of allowable tasks, maintained to prevent illegal tasks. Pilots can book these flyers through a central administration system, which searches for appropriate flyers, anonymises the contact and organizes the financial arrangement. The audio and video connection is live only (just as a phone call). The paper describes the basic concept and many use cases for this business.", month = may, number = "TR-186-2-14-1", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Purgathofer-2014-LSM/", } @article{birsak-2014-agtb, title = "Automatic Generation of Tourist Brochures", author = "Michael Birsak and Przemyslaw Musialski and Peter Wonka and Michael Wimmer", year = "2014", abstract = "We present a novel framework for the automatic generation of tourist brochures that include routing instructions and additional information presented in the form of so-called detail lenses. The first contribution of this paper is the automatic creation of layouts for the brochures. Our approach is based on the minimization of an energy function that combines multiple goals: positioning of the lenses as close as possible to the corresponding region shown in an overview map, keeping the number of lenses low, and an efficient numbering of the lenses. The second contribution is a route-aware simplification of the graph of streets used for traveling between the points of interest (POIs). This is done by reducing the graph consisting of all shortest paths through the minimization of an energy function. The output is a subset of street segments that enable traveling between all the POIs without considerable detours, while at the same time guaranteeing a clutter-free visualization. Video: http://www.youtube.com/watch?v=t3w7uxzSR-Y", month = apr, journal = "Computer Graphics Forum (Proceedings of EUROGRAPHICS 2014)", volume = "33", number = "2", issn = "1467-8659", pages = "449--458", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/birsak-2014-agtb/", } @bachelorsthesis{WEYDEMANN-2014-PIC, title = "Implementation of a PIC simulation using WebGL", author = "Leonard Weydemann", year = "2014", abstract = "This project’s aim is to find a WebGL based alternative to the Java implementation of Open- Pixi, a Java-based Particle-in-Cell (PIC) simulation software, and to add a third dimension. For this purpose, an existing JavaScript library, three.js, was chosen. A handful of approaches are explored and the resulting prototypes are then compared in terms of speed, as performance is a main concern. A shader-based implementation, the best performing of the prototypes, is then explained in more detail and recommendations for the future development of OpenPixi are given.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "WebGL, plasma simulation", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/WEYDEMANN-2014-PIC/", } @article{Guerrero-2014-GRF, title = "Edit Propagation using Geometric Relationship Functions", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer and Peter Wonka", year = "2014", abstract = "We propose a method for propagating edit operations in 2D vector graphics, based on geometric relationship functions. These functions quantify the geometric relationship of a point to a polygon, such as the distance to the boundary or the direction to the closest corner vertex. The level sets of the relationship functions describe points with the same relationship to a polygon. For a given query point we ?rst determine a set of relationships to local features, construct all level sets for these relationships and accumulate them. The maxima of the resulting distribution are points with similar geometric relationships. We show extensions to handle mirror symmetries, and discuss the use of relationship functions as local coordinate systems. Our method can be applied for example to interactive ?oor-plan editing, and is especially useful for large layouts, where individual edits would be cumbersome. We demonstrate populating 2D layouts with tens to hundreds of objects by propagating relatively few edit operations.", month = mar, journal = "ACM Transactions on Graphics", volume = "33", number = "2", issn = "0730-0301", doi = "10.1145/2591010", pages = "15:1--15:15", keywords = "Shape Modeling, Floor Plans, Edit Propagation, Geometric Relationship Functions", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Guerrero-2014-GRF/", } @inproceedings{bernhard-2014-EFD, title = "The Effects of Fast Disparity Adjustments in Gaze-Controlled Stereoscopic Applications", author = "Matthias Bernhard and Camillo Dellmour and Michael Hecher and Efstathios Stavrakis and Michael Wimmer", year = "2014", abstract = "With the emergence of affordable 3D displays, stereoscopy is becoming a commodity. However, often users report discomfort even after brief exposures to stereo content. One of the main reasons is the conflict between vergence and accommodation that is caused by 3D displays. We investigate dynamic adjustment of stereo parameters in a scene using gaze data in order to reduce discomfort. In a user study, we measured stereo fusion times after abrupt manipulation of disparities using gaze data. We found that gaze-controlled manipulation of disparities can lower fusion times for large disparities. In addition we found that gaze-controlled disparity adjustment should be applied in a personalized manner and ideally performed only at the extremities or outside the comfort zone of subjects. These results provide important insight on the problems associated with fast disparity manipulation and are essential for developing appealing gaze-contingent and gaze-controlled applications.", month = mar, isbn = "978-1-4503-2751-0", publisher = "ACM", location = "Safety Harbor, FL, USA", editor = "Pernilla Qvarfordt and Dan Witzner Hansen", booktitle = "Proceedings of the Symposium on Eye Tracking Research and Applications (ETRA 2014)", pages = "111--118", keywords = "stereoscopic rendering, comfort models, fusion time, eye tracking", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-EFD/", } @inproceedings{weissenboeck-2014, title = "FiberScout: An Interactive Tool for Exploring and Analyzing Fiber Reinforced Polymers", author = "Johannes Weissenb\"{o}ck and Artem Amirkhanov and Weimin Li and Andreas Reh and Aleksandr Amirkhanov and Eduard Gr\"{o}ller and Johann Kastner and Christoph Heinzl", year = "2014", abstract = "Advanced composites such as fiber reinforced polymers are promising candidate materials for future components as they allow integrating the continuously rising demands of industry regarding costeffectiveness, function-orientation, integration and weight. The most important structures of fiber reinforced polymers are the individual fibers, as their characteristics (stiffness, strength, ductility, durability, etc.) to a large extent determine the properties of the final component. The main contribution of this paper is the introduction of a new system for interactive exploration and visual analysis of fiber properties in X-ray computed tomography data of fiber reinforced polymers. The presented tool uses parallel coordinates to define and configure initial fiber classes. Using a scatter plot matrix linked to the parallel coordinates the initial classification may be refined. This allows to analyze hidden relationships between individual fiber properties. 2D and 3D views depict the resulting fiber classifications. By using polar plots an intuitive rendering of the fiber orientation distribution is provided. In addition, two modules of higher abstraction are proposed: The Blob visualization creates a hull around fibers with similar characteristics. The fiber metadata visualization allows to calculate overlays for 2D and 3D views containing regional information of particular material characteristics. The proposed system has been evaluated by two groups of domain experts. Applying the presented concepts the user feedback shows that the domain experts are now able to efficiently perform tasks as classification of fibers, visualization of fiber lengths and orientations, and visualization of fiber regions. The insights gained can be forwarded to the design office as well as to material development and simulation, in order to speed up the development of novel composite components.", month = mar, isbn = "978-1-4799-2874-3 ", publisher = "IEEE Computer Society", location = "Yokohama", booktitle = "Proceedings of 2014 IEEE Pacific Visualization Symposium (PacificVis) (2014)", pages = "153--160", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/weissenboeck-2014/", } @WorkshopTalk{ilcik-2014-cgbpmi, title = "Challenges in grammar-based procedural modeling of interiors", author = "Martin Il\v{c}\'{i}k", year = "2014", abstract = "While the creation of convincing cityscapes is a well researched problemm, there is a lack of robust and efficient techniques for modeling the interior of buildings. I my talk, I will introduce a recently started research project on Data-Driven Procedural Modeling of Interiors. In particular, I will focus on challenges for the subdivision of the interior space into rooms, placement of furniture and procedural generation of furniture pieces. Our preliminary results show first succesful extensions and generalizations to commonly used grammar-based approaches like the CGA-Shape or G^2 grammars.", month = feb, event = "High Visual Computing (HiVisComp) 2014", location = "Pec pod Snezkou, Czech Republic", keywords = "object placement, formal grammars, procedural modling, ideas, architecture", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/ilcik-2014-cgbpmi/", } @bachelorsthesis{KREUZER-2014-DPA, title = "Depixelizing Pixel Art on GPUs", author = "Felix Kreuzer", year = "2014", abstract = "Pixel art was frequently employed in games of the 90s and earlier. On today's large and high-resolution displays, pixel art looks blocky. Recently, an algorithm was introduced by Johannes Kopf and Dani Lischinski to create a smooth, resolution-indepenent vector representation from pixel art (see image). However, the algorithm is far too slow for interactive use for example in a game. The goal of this project was to implement the algorithm efficiently and on the GPU, so that it can be incorporated into current game emulators (ScummVM, dosemu, ...). ", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/KREUZER-2014-DPA/", } @bachelorsthesis{filipovic-2014-dect, title = "Noise Reduction in Medical DECT Data", author = "Mirza Filipovic", year = "2014", abstract = "Dual energy computed tomography (DECT) recently gained popularity for medical diagnostic imaging. It has been demonstrated how DECT can improve density measurement and material differentiation, and practical applications for DECT imaging in medicine. Noise reduction is standard operation in the process of image enhancement which is necessary operation prior to image evaluation done by radiologist. In this work, we describe two approaches for noise reduction using DECT data. First, we show in the work that the cross or joint bilateral filter can be effectively used on DECT images to reduce noise while preserving edges. Second, noise in two DECT images is anti-correlated and can be effectively removed by the KCNR algorithm. Even better results can be achieved by using algorithms that exploit an additional characteristic information of DECT data, such as the spectral information. It was shown that the KCNR can increase its performance regarding quality when the spectral information is corrected before applying the KCNR. AngioVis framework provides ability to present and manipulate CT data. All discussed image enhancement algorithms are implemented in AngioVis as a plugin.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/filipovic-2014-dect/", } @inproceedings{Groeller_Eduard_2014_UCT, title = "Uncertainty in CT Metrology: Visualizations for Exploration and Analysis of Geometric Tolerances", author = "Artem Amirkhanov and Bernhard Fr\"{o}hler and Michael Reiter and Johann Kastner and Eduard Gr\"{o}ller and Christoph Heinzl", year = "2014", abstract = "Industrial 3D X-ray computed tomography (3DXCT) is increasingly applied as a technique for metrology applications. In contrast to comventional metrology tools such as coordinate measurement machines (CMMs). 3DXCT only estimates the exact position of the specimen’s surface and is subjected to a specific set of artifact types. These factors result in uncertainty that is present in the data. Previous work by Amirkhanov et. al [2] presented a tool prototype that is taking such uncertainty into account when measuring geometric tolerances such as straightness, circularity, or flatness. In this paper we extend the previous work with two more geometric tolerance types: cylindricity and angularity. We provide methods and tools for visualization, inspection, and analysis of these tolerances. For the cylindricity tolerance we employ neighboring profiles visualization, box-plot overview, and interactive 3D view. We evaluate applicability and usefulness our methods on a new TP03 data set, and present results and new potential use cases.", month = feb, location = "Wels, Austria", issn = "978-3-8440-2557-6", event = "iCT Conference 2014", booktitle = "Proceedings of 5th Conference on Industrial Computed Tomography (iCT Conference 2014)", journal = "Proceedings of iCT 2014", pages = "189--195", keywords = "metrology, level-of-details, uncertainty visualization, Industrial 3D computed tomography", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_Eduard_2014_UCT/", } @mastersthesis{Tragust-2014-master-thesis, title = "Integrating Annotations into a Point-based Rendering System", author = "Markus Tragust", year = "2014", abstract = "The preservation of archaeological sites is an important task in cultural heritage. Classical methods conserve archaeological objects in museums and provide restoration of archaeological sites threatened by decay. The improved digitalization provides the possibility to generate an accurate representation of archaeological sites by using laser scanners. The resulting point clouds can preserve the archaeological site and provide the possibility to view it in its digital form even if it no longer exists. Usually, the archaeological site comes with a lot of different material, which has been created over the years. This material provides information about the digitalized object, which helps to gain a deeper understanding about the presented archaeological site. This thesis presents an annotation system for a point-cloud renderer. The system allows adding annotations in the 3D space next to the part of the point cloud it belongs to. This helps to provide the additional information of the point cloud in the context it belongs to. Moreover, each annotation should present interesting information about specific annotated parts of the archaeological site to the viewer. Besides simple textual annotations, a variable amount of documents, such as images and PDFs, can be attached to each annotation to provide all kind of information. Several filtering techniques, including viewpoint-dependent priority filtering, are presented to control the visibility of the annotations. Moreover, a guidance system based on graphs is introduced to lead viewers to different points of interest, which are represented as annotations. To provide a clear connection between annotations and the annotated part of the point cloud, a point-selection method and a point-marking method are presented. To allow the connection of a large set of annotations to a single point cloud, these methods are developed in CUDA. This is done by extending existing methods, which create octrees in CUDA. The developed methods allow fast execution on the GPU while a CPU-based method is not able to handle such a large amount of point selections in real-time.", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "user guidance, annotating objects, information system", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Tragust-2014-master-thesis/", } @bachelorsthesis{lkoell-2014, title = "Visualization in the Cloud", author = "Lukas K\"{o}ll", year = "2014", abstract = "As currently many new visualization techniques are developed, the need for rapid prototyping systems has arisen. Current visualization prototyping software provides extensive features, however it often lacks the possibility to easily start new visualization prototypes as well as the possibility to share and collaboratively work on those prototypes. Also, existing solutions involve a cumbersome and slow development process, because hardware-near solutions often require recompilation after every development step. The availability of hardware resources (e.g. GPU) is limited, hence a remote solution is required to take advantage of them, which also solves the problem of having to transfer large volume datasets. In this thesis, a system named VolumeShop Playground is introduced that compensates for the above stated disadvantages while still allowing for hardware-near development of realtime visualizations. VolumeShop Playground is based on the existing VolumeShop framework,enhances it by a scripting API and provides a web frontend for simple setup and collaboration. The fact that we use a modern scripting language instead of recompiling the source code every time will supply a tremendous increase in development speed.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/lkoell-2014/", } @bachelorsthesis{tweebo-2014, title = "Tweebo - Ein Geo-Basierter Twitter-Client", author = "August Kampfer and Matthias Sch\"{o}tta", year = "2014", abstract = "Twitter ist eine online Plattform, welche sich darauf spezialisiert hat Kurznachrichten, auch Tweets genannt, zu erstellen und in weiterer Folge online zu publizieren. Sobald ein Benutzer ein entsprechendes Twitter-Konto erstellt hat, hat er die M\"{o}glichkeit Kurznachrichten zu verfassen und per Favoritensystem andere Nachrichten-Streams zu verfolgen. Jeder Benutzer erh\"{a}lt seine eigene Twitter-Website, auf welcher seine Nachrichten einzusehen sind. Twitter wird auch gerne als Mikroblogging Plattform bezeichnet. Neben Facebook ist Twitter einer der gro{\ss}en Namen im Bereich der sozialen Netzwerke. Kurznachrichten, welche auf 140 Zeichen limitiert sind, bilden die Basis der Plattform. Diese enthalten neben der eigentlichen Nachricht, Metadaten, welche weitere Verarbeitungsebenen erm\"{o}glichen. Unter anderem stellt Twitter eine umfangreiche REST-API zur Verf\"{u}gung. Diese erm\"{o}glicht es Drittanbietern die Funktionalit\"{a}t von Twitter in andere Applikationen zu integrieren. Die Zurverf\"{u}gungstellung dieser Schnittstelle wirkt einer Kapselung der Twitter-Umgebung entgegen und motiviert neue Einsatzm\"{o}glichkeiten. Tweebo stellt eine dieser neuen Einsatzm\"{o}glichkeiten dar, fokusiert sich speziell auf den Standortaspekt eines Tweets und kann als geo-basierter Twitter Client f\"{u}r iOS, welcher eine wertvolle Kombination aus reinen Nachrichtenaspekten und Geolokalisierungen kombiniert, bezeichnet werden. Er wurde speziell f\"{u}r iOS entwickelt. Durch die au{\ss}ergew\"{o}hnliche Darstellung von Tweets auf einem dreidimensionalen Globus, wird es dem Benutzer erm\"{o}glicht, schnell Ver\"{a}nderungen im Weltgeschehen zu erkennen. Verschiedene Visualisierungsm\"{o}glichkeiten stellen erstmals eine geobasierte Darstellung von Tweets, auf einem mobilen Ger\"{a}t ansprechend zur Verf\"{u}gung. Tweebo versucht einen globalen Bezug hinsichtlich der Twitteraktivit\"{a}ten eines Benutzers zu visualisieren. Anstelle einer gewohnten Listenansicht erm\"{o}glicht Tweebo, durch Verarbeitung der zus\"{a}tzlichen Lokalisierungsdaten pro Kurznachricht, eine angepasste Ansicht auf einem dreidimensionalen Globus. Neben der eigentlichen Lokalisierung der Kurznachrichten auf dem Globus erm\"{o}glicht Tweebo auch die Visualisierung der Aktivit\"{a}tsintensit\"{a}t eines Nachrichtenstreams. Der Tweebo iOS Client unterst\"{u}tzt die prim\"{a}ren Twitter-Funktionlit\"{a}ten. Nat\"{u}rlich k\"{o}nnen Kurznachrichten erstellt und abgerufen werden, diese Funktionalit\"{a}ten werden aber mit einer zus\"{a}tzlichen Visualisierungsebene, dem dreidimensionalen Globus erweitert.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/tweebo-2014/", } @article{raidou_vis14, title = "Visual analytics for the exploration of multiparametric cancer imaging", author = "Renata Raidou and Marta Paes Moreira and Wouter van Elmpt and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2014", abstract = "Tumor tissue characterization can play an important role in thediagnosis and design of effective treatment strategies. In orderto gather and combine the necessary tissue information, multi-modal imaging is used to derive a number of parameters indica-tive of tissue properties. The exploration and analysis of relation-ships between parameters and, especially, of differences among dis-tinct intra-tumor regions is particularly interesting for clinical re-searchers to individualize tumor treatment. However, due to highdata dimensionality and complexity, the current clinical workflowis time demanding and does not provide the necessary intra-tumorinsight. We implemented a new application for the exploration ofthe relationships between parameters and heterogeneity within tu-mors. In our approach, we employ a well-known dimensionalityreduction technique [5] to map the high-dimensional space of tis-sue properties into a 2D information space that can be interactivelyexplored with integrated information visualization techniques. Weconducted several usage scenarios with real-patient data, of whichwe present a case of advanced cervical cancer. First indicationsshow that our application introduces new features and functionali-ties that are not available within the current clinical approach.", journal = "In Visual Analytics Science and Technology (VAST), 2014 IEEE Conference on Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/raidou_vis14/", } @bachelorsthesis{silvana_2014, title = "Automated Lighting Design For Photorealistic Rendering", author = "Silvana Podaras", year = "2014", abstract = "We present a novel technique to minimize the number of light sources in a virtual 3D scene without introducing any perceptible changes to it. The theoretical part of the thesis gives an overview on previous research in the field of automated lighting design, followed by an introduction to the theory of rendering and genetic algorithms. The implementation is done as extension called "Light Source Cleaner" to LuxRender, a physically based, open-source renderer. The algorithm adjusts the intensities of the light sources in a way that certain light sources can be canceled out, thus enabling to render a similar image with significantly less number of light sources, introducing a remarkable reduction to the execution time of scenes where many light sources are used.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "global illumination, photorealistic rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/silvana_2014/", } @article{raidou_vcbm14, title = "The iCoCooN:Integration of Cobweb Charts with Parallel Coordinates forVisual Analysis of DCE-MRI Modeling Variations", author = "Renata Raidou and Uulke A van der Heide and PJ van Houdt and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2014", abstract = "Efficacy of radiotherapy treatment depends on the specific characteristics of tumorous tissues. For the determi-nation of these characteristics, clinical practice uses Dynamic Contrast Enhanced (DCE) Magnetic ResonanceImaging (MRI). DCE-MRI data is acquired and modeled using pharmacokinetic modeling, to derive per voxela set of parameters, indicative of tissue properties. Different pharmacokinetic modeling approaches make differ-ent assumptions, resulting in parameters with different distributions. A priori, it is not known whether there aresignificant differences between modeling assumptions and which assumption is best to apply. Therefore, clinicalresearchers need to know at least how different choices in modeling affect the resulting pharmacokinetic parame-ters and also where parameter variations appear. In this paper, we introduce iCoCooN: a visualization applicationfor the exploration and analysis of model-induced variations in pharmacokinetic parameters. We designed a visualrepresentation, the Cocoon, by integrating perpendicularly Parallel Coordinate Plots (PCPs) with Cobweb Charts(CCs). PCPs display the variations in each parameter between modeling choices, while CCs present the relationsin a whole parameter set for each modeling choice. The Cocoon is equipped with interactive features to supportthe exploration of all data aspects in a single combined view. Additionally, interactive brushing allows to link theobservations from the Cocoon to the anatomy. We conducted evaluations with experts and also general users. Theclinical experts judged that the Cocoon in combination with its features facilitates the exploration of all significantinformation and, especially, enables them to find anatomical correspondences. The results of the evaluation withgeneral users indicate that the Cocoon produces more accurate results compared to independent multiples", journal = "Eurographics Workshop on Visual Computing for Biology and Medicine ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/raidou_vcbm14/", } @studentproject{BEHAM-2014-RCR, title = "CloudyDay: Rendering of clouds, atmosphere and light shafts in HDR for testing Computer Vision Algorithms", author = "Michael Beham", year = "2014", abstract = "Rendering of clouds, atmosphere, and other natural phenomenon is an important topic in computer graphics. In this technical report, we present a novel solution, which uses different techniques to generate a realistic representation of the sky. We present a billboard-based approach to create clouds. We use half-angle slicing to generate volumetric shadows. The resulting shadow map is then used for casting shadows on the terrain, the clouds, and other objects. We also use and compare different atmosphere models and providing light shafts. Furthermore, CloudyDay provides HDR mapping, a bloom effect, colour grading as well as some natural phenomenon like rain. We develop CloudyDay to test an autonomous flying robot. We present several enhancements, which consider the specific requirements of this specific application area. All objects can be created by an artist. This is great workflow, if a specific test-case should be created. However, creating a lot of different variations of an object is a time-consuming task. A more reasonable way is to create the shapes with procedural modelling. This technique enables to create objects (in this paper clouds, atmosphere,...) and vary the representation by varying the parameters. ", keywords = "real-time rendering, cloud rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/BEHAM-2014-RCR/", } @studentproject{sippl-2014-fss, title = "Framework for Shape Segmentation", author = "Sebastian Sippl", year = "2014", keywords = "geometry processing, shape segmentation, shape processing", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/sippl-2014-fss/", } @talk{Kehrer-2014-CSD, title = "Interactive Visual Analysis of Complex Scientific Data", author = "Johannes Kehrer", year = "2014", event = "TU M\"{u}nchen", location = "Munich, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Kehrer-2014-CSD/", } @talk{Groeller_2014_CV, title = "Comparative Visualization", author = "Eduard Gr\"{o}ller", year = "2014", abstract = "Visualization uses computer-supported, interactive, visual representations of (abstract) data to amplify cognition. In recent years data complexity and variability has increased considerably. This is due to new data sources as well as the availability of uncertainty, error and tolerance information. Instead of individual objects entire sets, collections, and ensembles are visually investigated. This raises the need for effective comparative visualization approaches. Visual data science and computational sciences provide vast amounts of digital variations of a phenomenon which can be explored through superposition, juxtaposition and explicit difference encoding. A few examples of comparative approaches coming from the various areas of visualization, i.e., scientific visualization, information visualization and visual analytics will be treated in more detail. Comparison and visualization techniques are helpful to carry out parameter studies for the special application area of non-destructive testing using 3D X-ray computed tomography (3DCT). We discuss multi-image views and an edge explorer for comparing and visualizing gray value slices and edges of several datasets simultaneously. Visual steering supports decision making in the presence of alternative scenarios. Multiple, related simulation runs are explored through branching operations. To account for uncertain knowledge about the input parameters, visual reasoning employs entire parameter distributions. This can lead to an uncertainty-aware exploration of (continuous) parameter spaces. VAICo, i.e., Visual Analysis for Image Comparison, depicts differences and similarities in large sets of images. It preserves contextual information, but also allows the user a detailed analysis of subtle variations. The approach identifies local changes and applies cluster analysis techniques to embed them in a hierarchy. The results of this comparison process are then presented in an interactive web application which enables users to rapidly explore the space of differences and drill-down on particular features. Given the amplified data variability, comparative visualization techniques are likely to gain in importance in the future. Research challenges, directions, and issues concerning this innovative area are sketched at the end of the talk.", event = "IEEE Pacific Visualization Symposium (PacificVis)2014", location = "Yokohama, Japan", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_2014_CV/", } @archiveproject{KAFKA-2014-19MRD, title = "19 Millarden Euro City", author = "Philipp Kafka", year = "2014", note = "unfinished student project", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/KAFKA-2014-19MRD/", } @talk{WIMMER-2014-DWNT, title = "Do we need the full reconstruction pipeline?", author = "Michael Wimmer", year = "2014", abstract = "The traditional cultural heritage documentation pipeline from acquisition using a range scanner to interactive display to the user is a tedious and labor-intensive process. In particular, reconstructing high-quality meshes from large point clouds can be time consuming. In this talk, I will present shortcuts to this pipeline. The first idea is not to reconstruct a mesh at all, but keep the original point cloud as long as possible. I will discuss the challenges in maintaining interactivity and high quality when dealing with the display and manipulation of huge point clouds. The second idea is to reconstruct extremely simple models for regular and man-made structures, using shape analysis and user guidance. These models can be shown in end-user installations and require very few resources for display. ", event = "EU-Korea Conference on Science and Technology", location = "Vienna, Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/WIMMER-2014-DWNT/", } @studentproject{Zhu-2014-flowvis, title = "Flow Visualization with Stream Surfaces", author = "Shu Zhu", year = "2014", abstract = "Stream Surfaces for Flow Visualization, implemented in C++ and OpenGL, based on the Hurricane dataset", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Zhu-2014-flowvis/", } @studentproject{leimer-2014-fsco, title = "A Framework for Shape Co-Analysis", author = "Kurt Leimer", year = "2014", abstract = "This application serves as a framework for Co-Analysis, in which a set of meshes of a specific category (e.g. candelabra or chairs) is analysed in an attempt to group the parts constituting each mesh into semantic categories. This is achieved by first individually subdividing each mesh into smaller segments and then clustering the segments of all meshes based on their face-level features. In the example pictured above, the mesh parts are grouped into 4 distinct categories, each represented by a different color. Furthermore, the framework allows the selection of different algorithms for each step of the Co-Analysis pipeline and can also be easily extended by adding more algorithms. ", keywords = "shape processing, shape segmentation, shape co-analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/leimer-2014-fsco/", } @misc{lemuzic_2014_ipv, title = "Illustrative Visualization of Biochemical Processes Featuring Multiple Temporal Scales", author = "Mathieu Le Muzic and Julius Parulek and Manuela Waldner and Ivan Viola", year = "2014", abstract = "Scientific illustrators are commonly using structural description of molecular compounds when depicting complex biochemical processes. However, computational biology also provides procedural models describing the function of biological processes which are not currently used in the production pipeline. Instead, animators utilize scientific knowledge to manually animate and reproduce the functioning of cellular biology. We would like to explore the use of such models in order to generate explanatory illustrations that would show how molecular machinery works. Particle-based simulations provide the means for spatially representing the dynamics of biochemical processes. They compute the positions of each single particle and are supposed to mimic a realistic behaviour of the metabolites. Current mesoscale visualization also allows to directly show the results of such simulations by mapping the positions of particles in a virtual 3D environment. Nevertheless, some biochemical processes, like the DNA repair for instance, exhibit temporal multiscale aspects because they comprise diffusion rates which are much greater in comparison with reaction rates. As a result, it is challenging to produce a clear and coherent visualization out of this type of simulation. Indeed, when viewing the process at the pace which would let us see the reactions, it becomes impossible for the human eye to keep track of individual elements because of the very large diffusion displacements. On the other hand, if one would playback the simulation slow enough to be see a steady motion of individual elements, then only a very few number of reactions would occur in a reasonable amount of time. In this work we propose to solve the problem associated with multiple temporal scales by providing means for spatial. With this approach we aim at showing the two different temporal scale at the same time by using advanced trajectory smoothing mechanism. This would allow us to see individual elements while showing a world full of reactions, hence enabling us to communicate complex biological processes and molecular machineries in a comprehensive way. ", event = "Eurographics Workshop on Visual Computing for Biology", Conference date = "Poster presented at Eurographics Workshop on Visual Computing for Biology (2014-09-04--2014-09-05)", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/lemuzic_2014_ipv/", } @talk{Auzinger_2014_UJA, title = "GPGPU in Graphics and Visualization", author = "Thomas Auzinger", year = "2014", event = "Invited Talk at Universidad de Ja\'{e}n in Spain", location = "Universidad de Ja\'{e}n, Spain", keywords = "GPGPU, medical, visualization, antialiasing", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Auzinger_2014_UJA/", } @talk{Groeller_2014_CQV, title = "Comparative and Quantitative Visualization in Material Sciences", author = "Eduard Gr\"{o}ller", year = "2014", event = "Seminar 14231, Scientific Visualization, Dagstuhl", location = "Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_2014_CQV/", } @talk{Groeller_2014_CaQ, title = "Comparative and Quantitative Visualization in Material Sciences", author = "Eduard Gr\"{o}ller", year = "2014", event = "Keynote talk at INFORMATIK 2014 Workshop Big Data Visual Computing - Quantitative Perspectives for Visual Computing", location = "Stuttgart, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_2014_CaQ/", } @talk{Groeller_2014_STC, title = "Selected Topics in Comparative Visualization", author = "Eduard Gr\"{o}ller", year = "2014", event = "Science Meeting of Visual Computing Center", location = "KAUST, Thuwal, Saudi Arabia", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_2014_STC/", } @talk{Groeller_2014_VND, title = "Visualization for Non-Destructive Testing", author = "Eduard Gr\"{o}ller", year = "2014", event = "NII Shonan Meeting Seminar 046 (Computer Visualization – Concepts and Challenges)", location = "Hayama Miura-gun, Kanagawa, Japan", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_2014_VND/", } @talk{Auzinger_2014_DCGI, title = "Analytic Rasterization", author = "Thomas Auzinger", year = "2014", event = "Invited Talk at Czech Technical University in Prague", location = "Czech Technical University in Prague, Department of Computer Graphics and Interaction, Prague", keywords = "antialiasing, analytic, prefiltering", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Auzinger_2014_DCGI/", } @phdthesis{scheiblauer-thesis, title = "Interactions with Gigantic Point Clouds", author = "Claus Scheiblauer", year = "2014", abstract = "During the last decade the increased use of laser range-scanners for sampling the environment has led to gigantic point cloud data sets. Due to the size of such data sets, tasks like viewing, editing, or presenting the data have become a challenge per se, as the point data is too large to fit completely into the main memory of a customary computer system. In order to accomplish these tasks and enable the interaction with gigantic point clouds on consumer grade computer systems, this thesis presents novel methods and data structures for efficiently dealing with point cloud data sets consisting of more than 109 point samples. To be able to access point samples fast that are stored on disk or in memory, they have to be spatially ordered, and for this a data structure is proposed which organizes the points samples in a level-of-detail hierarchy. Point samples stored in this hierarchy cannot only be rendered fast, but can also be edited, for example existing points can be deleted from the hierarchy or new points can be inserted. Furthermore, the data structure is memory efficient, as it only uses the point samples from the original data set. Therefore, the memory consumption of the point samples on disk, when stored in this data structure, is comparable to the original data set. A second data structure is proposed for selecting points. This data structure describes a volume inside which point samples are considered to be selected, and this has the advantage that the information about a selection does not have to be stored at the point samples. In addition to these two previously mentioned data structures, which represent novel contributions for point data visualization and manipulation, methods for supporting the presentation of point data sets are proposed. With these methods the user experience can be enhanced when navigating through the data. One possibility to do this is by using regional meshes that employ an out-of-core texturing method to show details in the mesoscopic scale on the surface of sampled objects, and which are displayed together with point clouds. Another possibility to increase the user experience is to use graphs in 3D space, which helps users to orient themselves inside point cloud models of large sites, where otherwise it would be difficult to find the places of interest. Furthermore, the quality of the displayed point cloud models can be increased by using a point size heuristics that can mimic a closed surface in areas that would otherwise appear undersampled, by utilizing the density of the rendered points in the different areas of the point cloud model. Finally, the use of point cloud models as a tool for archaeological work is proposed. Since it becomes increasingly common to document archaeologically interesting monuments with laser scanners, the number application areas of the resulting point clouds is raising as well. These include, but are not limited to, new views of the monument that are impossible when studying the monument on-site, creating cuts and floor plans, or perform virtual anastylosis. All these previously mentioned methods and data structures are implemented in a single software application that has been developed during the course of this thesis and can be used to interactively explore gigantic point clouds.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "point-based rendering, out-of-core rendering, data structures, complexity analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/scheiblauer-thesis/", } @studentproject{labschuetz-2014-IGI, title = "Image Based Global Illumination", author = "Matthias Labsch\"{u}tz", year = "2014", abstract = "The aim of this project was to find out how good image based global illumination can approximate ray-traced global illumination. The work identifies advantages and disadvantages of such techniques and gives some insight into how screen-space global illumination could be achieved in future works.", note = "1", keywords = "Global Illumination, Screen Space, Image Space, Evaluation", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/labschuetz-2014-IGI/", } @talk{Purgathofer-2014-ETH, title = "Aspects of Scientific Research in Cooperation with Companies", author = "Werner Purgathofer", year = "2014", event = "20 Year Anniversary of CGL at ETH", location = "Z\"{u}rich, Switzerland", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Purgathofer-2014-ETH/", } @talk{Purgathofer-2014-Rio, title = "Computer Graphics in Vienna and at the VRVis Research Center", author = "Werner Purgathofer", year = "2014", event = "Computer Graphics Lab at Universidade Federal do Rio de Janeiro", location = "Rio de Janeiro, Brazil", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Purgathofer-2014-Rio/", } @talk{Purgathofer_2014_AOR, title = "Application Oriented Research at the VRVis Center", author = "Werner Purgathofer", year = "2014", abstract = "The talk will describe how the VRVis Research Center was founded and how it functions today. Several application driven projects are outlined and some nice results are shown. He will also discuss the role of application oriented research institutions in the innovation pipeline, and the role and composition of the company partners will be explained. Finally, he will outline main requirements for the necessary qualifications of researchers at an application oriented research institution.", event = "The Visualization and Data Analysis group of the University of Vienna", location = "HS3, W\"{a}hringer Str. 29, Informatik, Uni Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Purgathofer_2014_AOR/", } @mastersthesis{fmistelbauer-2014-ad, title = "ActiveDICOM", author = "Florian Mistelbauer", year = "2014", abstract = "Digital Imaging and Communication in Medicine (DICOM) is a well-established image standard in medical imaging covering patient related information to network transfer protocols for medical workstations. As most DICOM images exhibit a rather static nature and visualization and computer graphics tend towards more interactive exploration of data, we try to enhance static images with interaction. For this purpose we will provide a mapping language that transfers an interaction to a specific visualization. All the necessary information will be encoded in the images and decoded by a provided viewer application for medical personnel.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/fmistelbauer-2014-ad/", } @talk{mistelbauer-2014-ekc, title = "Advanced Vessel Visualization", author = "Gabriel Mistelbauer", year = "2014", event = "EU-Korea Conference on Science and Technology", location = "Vienna, Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/mistelbauer-2014-ekc/", } @article{Rautek_Peter_2014_VSA, title = "ViSlang: A System for Interpreted Domain-Specific Languages for Scientific Visualization", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger", year = "2014", abstract = "Researchers from many domains use scientific visualization in their daily practice. Existing implementations of algorithms usually come with a graphical user interface (high-level interface), or as software library or source code (low-level interface). In this paper we present a system that integrates domain-specific languages (DSLs) and facilitates the creation of new DSLs. DSLs provide an effective interface for domain scientists avoiding the difficulties involved with low-level interfaces and at the same time offering more flexibility than high-level interfaces. We describe the design and implementation of ViSlang, an interpreted language specifically tailored for scientific visualization. A major contribution of our design is the extensibility of the ViSlang language. Novel DSLs that are tailored to the problems of the domain can be created and integrated into ViSlang. We show that our approach can be added to existing user interfaces to increase the flexibility for expert users on demand, but at the same time does not interfere with the user experience of novice users. To demonstrate the flexibility of our approach we present new DSLs for volume processing, querying and visualization. We report the implementation effort for new DSLs and compare our approach with Matlab and Python implementations in terms of run-time performance.", issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "12", volume = "20", pages = "2388--2396", keywords = " Volume visualization framework , Volume visualization, Domain-specific languages", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Rautek_Peter_2014_VSA/", } @talk{mindek-2014-vivi_cvut, title = "ViviSection: Skeleton-based Volume Editing", author = "Peter Mindek", year = "2014", event = "Scientific meeting of Department of Computer Graphics and Interaction", location = "Czech Technical University in Prague", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/mindek-2014-vivi_cvut/", } @xmascard{mindek-xmas-card-2014, title = "X-Mas Card 2014", author = "Peter Mindek", year = "2014", abstract = "This greeting card shows a Christmas preparation scene - the origin of the Christmas tree. Don't feel bad for the tree! In fact, it is happy to bring joy to children and get fancy makeup too. It is just a bit shocked at this moment. In the sky we see several constellations. From left, there are Encephalon, Maestros, Pelvis, and Infans. The interpretations of the constellations are created through medical-visualization technology. Even though these constellations have not yet been discovered, they might be one day. Diese Gru{\ss}karte zeigt eine Weihnachtsvorbereitungsszene - der Ursprung des Weihnachtsbaumes. Sie brauchen den Baum nicht zu bedauern! Tats\"{a}chlich ist er froh, Kinder gl\"{u}cklich zu machen und sch\"{o}n geschm\"{u}ckt zu werden. Er ist nur in diesem Moment ein wenig geschockt. Am Himmel sieht man mehrere Sternzeichen. Von links nach rechts sind das Encephalon, Maestros, Pelvis und Infans. Die Interpretationen wurden durch medizinische Visualisierungen erzeugt. Zwar wurden diese Sternbilder bisher noch nicht entdeckt, das k\"{o}nnte sich aber eines Tages \"{a}ndern. ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/mindek-xmas-card-2014/", } @incollection{Groeller_Eduard_2014_THS, title = "The Haunted Swamps of Heuristics: Uncertainty in Problem Solving", author = "Artem Amirkhanov and Stefan Bruckner and Christoph Heinzl and Eduard Gr\"{o}ller", year = "2014", abstract = "In scientific visualization the key task of research is the provision of insight into a problem. Finding the solution to a problem may be seen as finding a path through some rugged terrain which contains mountains, chasms, swamps, and few flatlands. This path—an algorithm discovered by the researcher—helps users to easily move around this unknown area. If this way is a wide road paved with stones it will be used for a long time by many travelers. However, a narrow footpath leading through deep forests and deadly swamps will attract only a few adventure seekers. There are many different paths with different levels of comfort, length, and stability, which are uncertain during the research process. Finding a systematic way to deal with this uncertainty can greatly assist the search for a safe path which is in our case the development of a suitable visualization algorithm for a specific problem. In this work we will analyze the sources of uncertainty in heuristically solving visualization problems and will propose directions to handle these uncertainties.", booktitle = "Scientific Visualization", chapter = "Uncertainty, Multifield, Biomedical, and Scalable Visualization", editor = "Charles D. Hansen, Min Chen, Christopher R. Johnson, Arie E. Kaufman, Hans Hagen", isbn = "978-1-4471-6496-8", note = "Chapter 5", publisher = "Springer London", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_Eduard_2014_THS/", } @talk{Purgathofer-2014-Pantalk, title = "Requirements on the Staff of an Application Oriented Research Organization", author = "Werner Purgathofer", year = "2014", event = "Current Issues of Science and Research in the Global World 2014", location = "Vienna, Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Purgathofer-2014-Pantalk/", } @talk{Purgathofer-2014-EurasiaGr, title = "Accurate Fast Simulation of Light", author = "Werner Purgathofer", year = "2014", event = "Eurasia Graphics", location = "Ankara, Turkey", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Purgathofer-2014-EurasiaGr/", } @talk{mindek_peter-2014-cs_kaust, title = "Managing Spatial Selections with Contextual Snapshots", author = "Peter Mindek", year = "2014", abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing particular features, they can be analyzed and compared in different views. However, the semantics of such selections often depend on specific parameter settings and it can be difficult to reconstruct them without additional information. In this paper, we present the concept of contextual snapshots as an effective means for managing spatial selections in visualized data. The selections are automatically associated with the context in which they have been created. Contextual snapshots can also be used as the basis for interactive integrated and linked views, which enable in-place investigation and comparison of multiple visual representations of data. Our approach is implemented as a flexible toolkit with well-defined interfaces for integration into existing systems. We demonstrate the power and generality of our techniques by applying them to several distinct scenarios such as the visualization of simulation data, the analysis of historical documents, and the display of anatomical data.", event = "Scientific meeting of Visual Computing Center", location = "King Abdullah University of Science and Technology, Saudi Arabia", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/mindek_peter-2014-cs_kaust/", } @bachelorsthesis{boesch_2014_browserHistoryVis, title = "The Chaser: Chrome Extension for History Visualization", author = "Chris B\"{o}sch", year = "2014", abstract = "Revisitation of previously requested URLs happens frequently and the most common list-view-based visualization of the user’s browsing history provided by nearly every internet browser cannot give a compact general view. For this reason we designed and implemented an extension for Chrome called The Chaser, by which an alternative visualization of the content is possible. The currently available add-ons / extensions have other aims to illustrate the history. Some of them are about to show the differences in quantity of called pages. Others give an overview of page paths and the user’s tracks from site to site. Our extension concentrates on helping finding a visited page and giving users a better overview of their called URLs. The user should get the ability to control the time-line with mouse gestures and/or keyboard input. After discarding a 3D prototype we came to the conclusion of designing a simple, self-explanatory time-based illustration with two dimensions. The x-axis represents the time with different levels of detail and the y-axis the visited hosts. After performing an evaluation with six probands where The Chaser’s visualization and its efficiency was compared to the standard list-view, all of them would prefer our extension against the standard history view.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/boesch_2014_browserHistoryVis/", }