@talk{Groeller_2016_I6, title = "Visual Computing and Analysis of Complex Systems", author = "Eduard Gr\"{o}ller", year = "2016", month = dec, event = "Invited Talk, State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", location = "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_I6/", } @talk{purgathofer_2016I1, title = "Visual Computing — a best practice from Vienna", author = "Werner Purgathofer", year = "2016", month = dec, event = "National Research University – Higher School of Economics (HSE)", location = "Moskau", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/purgathofer_2016I1/", } @talk{Groeller_2016_I5, title = "Visual Computing and Analysis of Complex Systems", author = "Eduard Gr\"{o}ller", year = "2016", month = dec, event = "Invited talk at SIGGRAPH Asia 2016 Symposium on Visualization, Macao, China", location = "SIGGRAPH Asia 2016 Symposium on Visualization, Macao, China", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_I5/", } @talk{Groeller_2016_I8, title = "Visual Computing for the Analysis of Complex Systems", author = "Eduard Gr\"{o}ller", year = "2016", month = dec, event = "Invited Talk at the Department of Computer Science and Engineering (CSE) at the Hong Kong University of Science and Technology (HKUST)", location = "Department of Computer Science and Engineering (CSE) at the Hong Kong University of Science and Technology (HKUST), Hong Kong", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_I8/", } @article{forsythe-2016-ccm, title = "Resolution-independent superpixels based on convex constrained meshes without small angles", author = "Jeremy Forsythe and Vitaliy Kurlin and Andrew Fitzgibbon", year = "2016", abstract = "The over-segmentation problem for images is studied in the new resolution-independent formulation when a large image is approximated by a small number of convex polygons with straight edges at subpixel precision. These polygonal superpixels are obtained by refining and extending subpixel edge segments to a full mesh of convex polygons without small angles and with approximation guarantees. Another novelty is the objective error difference between an original pixel-based image and the reconstructed image with a best constant color over each superpixel, which does not need human segmentations. The experiments on images from the Berkeley Segmentation Database show that new meshes are smaller and provide better approximations than the state-of-the-art.", month = dec, journal = "Lecture Notes in Computer Science (LNCS)", volume = "10072", issn = "0302-9743", pages = "223--233", keywords = "superpixels, polygonal mesh, Delaunay triangulation, constrained triangulation, edge detection", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/forsythe-2016-ccm/", } @xmascard{mindek-xmas-card-2016, title = "X-Mas Card 2016", author = "Ludovic Autin and Peter Mindek", year = "2016", abstract = "As far as we can tell, the Universe is made of atoms. Or pixels. In any case, this Christmas card celebrates both. This year's Christmas tree is decorated with a chain of DNA molecules modeled as a spline populated by nucleotides. Several macromolecules of Fibrinogen, Hemoglobin, and Low-Density Lipoprotein are used as decorations as well. All the proteins, as well as the DNA, are modeled down to atomic resolution. The scene is rendered in real-time using cellVIEW - a molecular visualization framework developed at TU Wien and Scripps Research Institute. *** Soweit wir wissen besteht das Universum aus Atomen. Oder Pixel. Wie auch immer, diese Weihnachtskarte feiert beides. Der Weihnachtsbaum ist mit einer DNA-Molek\"{u}lkette geschm\"{u}ckt, modelliert als ein Spline der mit Nukleotiden besetzt ist. Auch mehrere Fibrinogen-, H\"{a}moglobin- und Lipoprotein-Makromolek\"{u}le wurden als Dekorationen verwendet. Alle Proteine, als auch die DNA, sind bis auf Atomaufl\"{o}sung modelliert. Die Szene wurde mit cellVIEW in Echtzeit gerendert. cellVIEW ist eine Visualisierungssoftware f\"{u}r Molek\"{u}le, die an der TU Wien und dem Scripps Research Institute entwickelt wurde.", month = dec, keywords = "Molecular Visualization, DNA, Proteins", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/mindek-xmas-card-2016/", } @inproceedings{ZOTTI-2016-VAA, title = "Virtual Archaeoastronomy: Stellarium for Research and Outreach", author = "Georg Zotti and Florian Schaukowitsch and Michael Wimmer and Wolfgang Neubauer", year = "2019", abstract = "In the last few years, the open-source desktop planetarium program Stellarium has become ever more popular for research and dissemination of results in Cultural Astronomy. In this time we have added significant capabilities for applications in cultural astronomy to the program. The latest addition allows its use in a multi-screen installation running both completely automated and manually controlled setups. During the development time, also the accuracy of astronomical simulation has been greatly improved.", month = mar, isbn = "978-3-319-97006-6", publisher = "Springer", location = "Milano, Italy", event = "SIA 2016 (16th Conference of the Italian Society for Archaeoastronomy)", booktitle = "Archaeoastronomy in the Roman World (Proceedings 16th Conference of the Italian Society for Archaeoastronomy)", pages = "187--205", keywords = "stellarium", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/ZOTTI-2016-VAA/", } @article{Groeller_2016_P7, title = "Depth functions as a quality measure and for steering multidimensional projections", author = "Douglas Cedrim and Viktor Vad and Afonso Paiva and Eduard Gr\"{o}ller and Luis Gustavo Nonato and Antonio Castelo", year = "2016", abstract = "The analysis of multidimensional data has been a topic of continuous research for many years.This type of data can be found inseveral different areas ofscience. The analysis of multidimensional data has been a topic of continuous research for many years. This type of data can be found in several different areas of science. A common task while analyzing such data is to investigate patterns by interacting with spatializations of the data in a visual domain. Understanding the relation between the underlying dataset characteristics and the technique used to provide its visual representation is of fundamental importance since it can provide a better intuition on what to expect from the spatialization. In this paper, we propose the usage of concepts from non-parametric statistics, namely depth functions, as a quality measure for spatializations. We evaluate the action of multi-dimensional projection techniques on such estimates. We apply both qualitative and quantitative ana-lyses on four different multidimensional techniques selected according to the properties they aim to preserve. We evaluate them with datasets of different characteristics: synthetic, real world, high dimensional; and contaminated with outliers. As a straightforward application, we propose to use depth information to guide multidimensional projection techniques which rely on interaction through control point selection and positioning. Even for techniques which do not intend to preserve any centrality measure, interesting results can be achieved by separating regions possibly contaminated with outliers. ", month = nov, journal = "Computers & Graphics (Special Section on SIBGRAPI 2016)", volume = "60", issn = "doi: 10.1016/j.cag.2016.08.008", pages = "93--106", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P7/", } @article{Kozlikova-Visualization-2016b, title = "Visualization of Biomolecular Structures: State of the Art Revisited", author = "Barbora Kozlikova and Michael Krone and Martin Falk and Norbert Lindow and Daniel Baum and Ivan Viola and Marc Baaden and Julius Parulek and Hans-Christian Hege", year = "2016", month = nov, doi = "10.1111/cgf.13072", journal = "Computer Graphics Forum", number = "XX", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Kozlikova-Visualization-2016b/", } @phdthesis{LeMuzic_2016_PhD, title = "From Atoms to Cells: Interactive and Illustrative Visualization of Digitally Reproduced Lifeforms", author = "Mathieu Le Muzic", year = "2016", abstract = "Macromolecules, such as proteins, are the building blocks of the machinery of life, and therefore are essential to the comprehension of physiological processes. In physiology, illustrations and animations are often utilized as a mean of communication because they can easily be understood with little background knowledge. However, their realization requires numerous months of manual work, which is both expensive and time consuming. Computational biology experts produce everyday large amount of data that is publicly available and that contains valuable information about the structure and also the function of these macromolecules. Instead of relying on manual work to generate illustrative visualizations of the cell biology, we envision a solution that would utilize all the data already available in order to streamline the creation process. In this thesis are presented several contributions that aim at enabling our vision. First, a novel GPU-based rendering pipeline that allows interactive visualization of realistic molecular datasets comprising up to hundreds of millions of macromolecules. The rendering pipeline is embedded into a popular game engine and well known computer graphics optimizations were adapted to support this type of data, such as level-of-detail, instancing and occlusion queries. Secondly, a new method for authoring cutaway views and improving spatial exploration of crowded molecular landscapes. The system relies on the use of clipping objects that are manually placed in the scene and on visibility equalizers that allows fine tuning of the visibility of each species present in the scene. Agent-based modeling produces trajectory data that can also be combined with structural information in order to animate these landscapes. The snapshots of the trajectories are often played in fast-forward to shorten the length of the visualized sequences, which also renders potentially interesting events occurring at a higher temporal resolution invisible. The third contribution is a solution to visualize time-lapse of agent-based simulations that also reveals hidden information that is only observable at higher temporal resolutions. And finally, a new type of particle-system that utilize quantitative models as input and generate missing spatial information to enable the visualization of molecular trajectories and interactions. The particle-system produces a similar visual output as traditional agent-based modeling tools for a much lower computational footprint and allows interactive changing of the simulation parameters, which was not achievable with previous methods.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/LeMuzic_2016_PhD/", } @talk{Groeller_2016_I4, title = "Visual Data Exploration", author = "Eduard Gr\"{o}ller", year = "2016", month = oct, event = "Keynote talk at the 21st International Symposium on Vision, Modeling and Visualization (VMV 2016), Bayreuth, Germany", location = "Keynote talk at the 21st International Symposium on Vision, Modeling and Visualization (VMV 2016), Bayreuth, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_I4/", } @inproceedings{Groeller_2016_P6, title = "PorosityAnalyzer: Visual Analysis and Evaluation of Segmentation Pipelines to Determine the Porosity in Fiber-Reinforced Polymers", author = "Johannes Weissenb\"{o}ck and Artem Amirkhanov and Eduard Gr\"{o}ller and Johannes Kastner and Christoph Heinzl", year = "2016", abstract = "In this paper we present PorosityAnalyzer, a novel tool for detailed evaluation and visual analysis of pore segmentation pipelines to determine the porosity in fiber-reinforced polymers (FRPs). The presented tool consists of two modules: the computation module and the analysis module. The computation module enables a convenient setup and execution of distributed off-line-computations on industrial 3D X-ray computed tomography datasets. It allows the user to assemble individual segmentation pipelines in the form of single pipeline steps, and to specify the parameter ranges as well as the sampling of the parameter-space of each pipeline segment. The result of a single segmentation run consists of the input parameters, the calculated 3D binary-segmentation mask, the resulting porosity value, and other derived results (e.g., segmentation pipeline runtime). The analysis module presents the data at different levels of detail by drill-down filtering in order to determine accurate and robust segmentation pipelines. Overview visualizations allow to initially compare and evaluate the segmentation pipelines. With a scatter plot matrix (SPLOM), the segmentation pipelines are examined in more detail based on their input and output parameters. Individual segmentation-pipeline runs are selected in the SPLOM and visually examined and compared in 2D slice views and 3D renderings by using aggregated segmentation masks and statistical contour renderings. PorosityAnalyzer has been thoroughly evaluated with the help of twelve domain experts. Two case studies demonstrate the applicability of our proposed concepts and visualization techniques, and show that our tool helps domain experts to gain new insights and improve their workflow efficiency.", month = oct, publisher = "IEEE Computer Society", booktitle = "IEEE Conference on Visual Analytics Science and Technology, 2016 (VAST 2016)", pages = "101--110", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P6/", } @article{steiner_2016_isad, title = "Integrated Structural-Architectural Design for Interactive Planning", author = "Bernhard Steiner and Elham Mousavian and Fatemeh Mehdizadeh Saradj and Michael Wimmer and Przemyslaw Musialski", year = "2017", abstract = "Traditionally, building floorplans are designed by architects with their usability, functionality, and architectural aesthetics in mind, however, the structural properties of the distribution of load-bearing walls and columns are usually not taken into account at this stage. In this paper we propose a novel approach for the design of architectural floorplans by integrating structural layout analysis directly into the planning process. In order to achieve this, we introduce a planning tool which interactively enforces checks for structural stability of the current design, and which on demand proposes how to stabilize it if necessary. Technically, our solution contains an interactive architectural modeling framework as well as a constrained optimization module where both are based on respective architectural rules. Using our tool, an architect can predict already in a very early planning stage which designs are structurally sound such that later changes due to stability reasons can be prevented. We compare manually computed solutions with optimal results of our proposed automated design process in order to show how much our proposed system can help architects to improve the process of laying out structural models optimally.", month = dec, doi = "10.1111/cgf.12996", issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "36", pages = "80--94", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/steiner_2016_isad/", } @inproceedings{WIMMER-2016-HARVEST4D, title = "Harvesting Dynamic 3DWorlds from Commodity Sensor Clouds", author = "Tamy Boubekeur and Paolo Cignoni and Elmar Eisemann and Michael Goesele and Reinhard Klein and Stefan Roth and Michael Weinmann and Michael Wimmer", year = "2016", abstract = "The EU FP7 FET-Open project "Harvest4D: Harvesting Dynamic 3D Worlds from Commodity Sensor Clouds" deals with the acquisition, processing, and display of dynamic 3D data. Technological progress is offering us a wide-spread availability of sensing devices that deliver different data streams, which can be easily deployed in the real world and produce streams of sampled data with increased density and easier iteration of the sampling process. These data need to be processed and displayed in a new way. The Harvest4D project proposes a radical change in acquisition and processing technology: instead of a goal-driven acquisition that determines the devices and sensors, its methods let the sensors and resulting available data determine the acquisition process. A variety of challenging problems need to be solved: huge data amounts, different modalities, varying scales, dynamic, noisy and colorful data. This short contribution presents a selection of the many scientific results produced by Harvest4D. We will focus on those results that could bring a major impact to the Cultural Heritage domain, namely facilitating the acquisition of the sampled data or providing advanced visual analysis capabilities.", month = oct, isbn = "978-3-03868-011-6", publisher = "Eurographics Association", location = "Genova, Italy", event = "GCH 2016", editor = "Chiara Eva Catalano and Livio De Luca", doi = "10.2312/gch.20161378", booktitle = "Proceedings of the 14th Eurographics Workshop on Graphics and Cultural Heritage", pages = "19--22", keywords = "acquisition, 3d scanning, reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/WIMMER-2016-HARVEST4D/", } @bachelorsthesis{Przemyslaw_Gora_2016_UVU, title = "Unreal vs Unity: Ein Vergleich zwischen zwei modernen Spiele-Engines", author = "Przemyslaw Gora and Lukas Leibetseder", year = "2016", abstract = "This bachelor’s thesis focuses on the comparison of two game engines, the Unreal Engine 4 and Unity 5 Engine. We will take a closer look at the different aspects that we find important, describe and compare them. Starting with the content-pipeline, which includes the usage of externally created content, we will focus on three big categories: Audio, Images and 3D-Assets. During this process it will be shown that Unity 5 supports much more formats to import than the Unreal Engine 4. This is especially noticeable with Audio and 3D-Assets. For the latter there is a feature in Unity 5 that allows you to directly import formats of various modelling tools like Maya, although it is fair to mention that in a few cases one will be reverting to the standard way of importing FBX files. While Unreal Engine 4 doesn’t have a huge support for external formats it offers more options to use the assets within the engine. In the following chapter we will take a look at the features each engine has to offer. Both, Unreal and Unity, have a big arsenal of tools to simplify various aspects of the development process. Yet again the Unreal Engines offers a greater set of options. Afterwards we will create a simple small project in Unreal Engine 4 and Unity 5 to demonstrate the usability and tools both engines have to offer. As we will see, the level design and placing of some objects in the editor is very similar. The interesting part starts with the creation of a controllable player character. The behaviour of such is realized differently on both sides. In Unity 5 one uses C#-scripts whereas Unreal Engine 4 offers visual scripting. We will compare those two systems and point out their pros and cons. In the further course we will take a look at the list of effects from the lecture UE Computergraphik (186.831) and check if they are available in either of both engines. In the last chapter, we’ll take a look at the legal aspects and limitation when using Unreal and Unity. It’s interesting to see how far it is possible to use those engines in university lectures.", month = oct, note = "1", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Unreal, Unity 3D, Game Engine", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Przemyslaw_Gora_2016_UVU/", } @inproceedings{Reichinger-2016-spaghetti, title = "Spaghetti, Sink and Sarcophagus: Design Explorations of Tactile Artworks for Visually Impaired People", author = "Andreas Reichinger and Werner Purgathofer", year = "2016", month = oct, event = "9th Nordic Conference on CHI 2016", booktitle = "Proceedings of the 9th Nordic Conference on CHI 2016", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Reichinger-2016-spaghetti/", } @studentproject{erler_philipp-2016-femfluid-prakt, title = "Finite Element Fluids in Matlab", author = "Philipp Erler", year = "2016", abstract = "Implementation of a 2D finite elements free-surface liquid simulation with solid obstables in Matlab.", month = oct, keywords = "Finite Element Method, FEM, Fluid Simulation", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/erler_philipp-2016-femfluid-prakt/", } @mastersthesis{SCHUETZ-2016-POT, title = "Potree: Rendering Large Point Clouds in Web Browsers", author = "Markus Sch\"{u}tz", year = "2016", abstract = "This thesis introduces Potree, a web-based renderer for large point clouds. It allows users to view data sets with billions of points, from sources such as LIDAR or photogrammetry, in real time in standard web browsers. One of the main advantages of point cloud visualization in web browser is that it allows users to share their data sets with clients or the public without the need to install third-party applications and transfer huge amounts of data in advance. The focus on large point clouds, and a variety of measuring tools, also allows users to use Potree to look at, analyze and validate raw point cloud data, without the need for a time-intensive and potentially costly meshing step. The streaming and rendering of billions of points in web browsers, without the need to load large amounts of data in advance, is achieved with a hierarchical structure that stores subsamples of the original data at different resolutions. A low resolution is stored in the root node and with each level, the resolution gradually increases. The structure allows Potree to cull regions of the point cloud that are outside the view frustum, and to render distant regions at a lower level of detail. The result is an open source point cloud viewer, which was able to render point cloud data sets of up to 597 billion points, roughly 1.6 terabytes after compression, in real time in a web browser.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "point cloud rendering, WebGL, LIDAR", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/SCHUETZ-2016-POT/", } @article{Groeller_2016_P4, title = "Visual Analytics for the Exploration and Assessment of Segmentation Errors", author = "Renata Raidou and Freek Marcelis and Marcel Breeuwer and Eduard Gr\"{o}ller and Anna Vilanova i Bartroli and Huub van de Wetering", year = "2016", abstract = "Several diagnostic and treatment procedures require the segmentation of anatomical structures from medical images. However, the automatic model-based methods that are often employed, may produce inaccurate segmentations. These, if used as input for diagnosis or treatment, can have detrimental effects for the patients. Currently, an analysis to predict which anatomic regions are more prone to inaccuracies, and to determine how to improve segmentation algorithms, cannot be performed. We propose a visual tool to enable experts, working on model-based segmentation algorithms, to explore and analyze the outcomes and errors of their methods. Our approach supports the exploration of errors in a cohort of pelvic organ segmentations, where the performance of an algorithm can be assessed. Also, it enables the detailed exploration and assessment of segmentation errors, in individual subjects. To the best of our knowledge, there is no other tool with comparable functionality. A usage scenario is employed to explore and illustrate the capabilities of our visual tool. To further assess the value of the proposed tool, we performed an evaluation with five segmentation experts. The evaluation participants confirmed the potential of the tool in providing new insight into their data and employed algorithms. They also gave feedback for future improvements.", month = sep, journal = "Eurographics Workshop on Visual Computing for Biology and Medicine", pages = "193--202", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P4/", } @mastersthesis{Pfahler-2016-MT, title = "Visualisierung hochdimensionaler Daten mit hierarchischer Gruppierung von Teilmengen", author = "David Pfahler", year = "2019", abstract = "The number of installed sensors to acquire data, for example electricity meters in smart grids, is increasing rapidly. The huge amount of collected data needs to be analyzed and monitored by transmission-system operators. This task is supported by visual analytics techniques, but traditional multi-dimensional data visualization techniques do not scale very well for high-dimensional data. The main contribution of this thesis is a framework to efficiently examine and compare such high-dimensional data. The key idea is to divide the data by the semantics of the underlying dimensions into groups. Domain experts are familiar with the meta-information of the data and are able to structure these groups into a hierarchy. Various statistical properties are calculated from the subdivided data. These are then visualized by the proposed system using appropriate means. The hierarchy and the visualizations of the calculated statistical values are displayed in a tabular layout. The rows contain the subdivided data and the columns visualize their statistics. Flexible interaction possibilities with the visual representation help the experts to fulfill their analysis tasks. The tasks include searching for structures, sorting by statistical properties, identifying correlations of the subdivided data, and interactively subdivide or combine the data. A usage scenario evaluates the design of the framework with a data set of the target domain in the energy sector.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/Pfahler-2016-MT/", } @bachelorsthesis{prost-2016-molecule, title = "Molecule-Rendering in Unity3D", author = "Lukas Prost", year = "2016", abstract = "Due to their omnipresence and ease of use, smart phones are getting more and more utilized as educational instruments for different subjects, for example, visualizing molecules in a chemistry class. In domain-specific mobile visualization applications, the choice of the ideal visualization technique of molecules can vary based on the background and age of the target group, and mostly depends on the choice of a graphical designer. Designers, however, rarely have sufficient programming skills and require an engineer even for the slightest adjustment in the required visual appearance. In this thesis we present a configuration system for rendering effects implemented in Unity3D, that allows to define the visual appearance of a molecule in a JSON file without the need of programming knowledge. We discuss the technical realization of different rendering effects on a mobile platform, and demonstrate our system and its versatility on a commercial chemistry visualization app, creating different visual styles for molecule renderings that are appealing to students as well as scientists and advertisement. ", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "molecule visualization, Unity, rendering effects, mobile devices", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/prost-2016-molecule/", } @article{Mistelbauer_Gabriel_2016, title = "Aortic Dissection Maps: Comprehensive Visualization of Aortic Dissections for Risk Assessment", author = "Gabriel Mistelbauer and Johanna Schmidt and A.M. Sailer and Kathrin B\"{a}umler and Shannon Walters and Dominik Fleischmann", year = "2016", abstract = "Aortic dissection is a life threatening condition of the aorta, characterized by separation of its wall layers into a true and false lumen. A subset of patients require immediate surgical or endovascular repair. All survivors of the acute phase need long-term surveillance with imaging to monitor chronic degeneration and dilatation of the false lumen and prevent late adverse events such as rupture, or malperfusion. We introduce four novel plots displaying features of aortic dissections known or presumed to be associated with risk of future adverse events: Aortic diameter, the blood supply (outflow) to the aortic branches from the true and false lumen, the previous treatment, and an estimate of adverse event-free probabilities in one, two and 5 years. Aortic dissection maps, the composite visualization of these plots, provide a baseline for visual comparison of the complex features and associated risk of aortic dissection. These maps may lead to more individualized monitoring and improved, patient-centric treatment planning in the future.", month = sep, journal = "Eurographics Workshop on Visual Computing for Biology and Medicine (2016)", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Mistelbauer_Gabriel_2016/", } @inproceedings{sorger-2016-fowardabstraction, title = "Illustrative Transitions in Molecular Visualization via Forward and Inverse Abstraction Transform", author = "Johannes Sorger and Peter Mindek and Tobias Klein and Graham Johnson and Ivan Viola", year = "2016", abstract = "A challenging problem in biology is the incompleteness of acquired information when visualizing biological phenomena. Structural biology generates detailed models of viruses or bacteria at different development stages, while the processes that relate one stage to another are often not clear. Similarly, the entire life cycle of a biological entity might be available as a quantitative model, while only one structural model is available. If the relation between two models is specified at a lower level of detail than the actual models themselves, the two models cannot be interpolated correctly. We propose a method that deals with the visualization of incomplete data information in the developmental or evolutionary states of biological mesoscale models, such as viruses or microorganisms. The central tool in our approach is visual abstraction. Instead of directly interpolating between two models that show different states of an organism, we gradually forward transform the models into a level of visual abstraction that matches the level of detail of the modeled relation between them. At this level, the models can be interpolated without conveying false information. After the interpolation to the new state, we apply the inverse transformation to the model’'s original level of abstraction. To show the flexibility of our approach, we demonstrate our method on the basis of molecular data, in particular data of the HIV virion and the mycoplasma bacterium.", month = sep, organization = "Eurographics", location = "Bergen", editor = "S. Bruckner, B. Preim, and A. Vilanova", booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine (VCBM)", pages = "21--30", keywords = "I.3.3 [Computer Graphics]: Picture/Image Generation-Display algorithms", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/sorger-2016-fowardabstraction/", } @bachelorsthesis{Tucek_Tom-2016-aai, title = "Agent-based architecture for artistic real-time installation", author = "Tom Tucek", year = "2016", abstract = "The aim of this thesis is to transfer artistically predetermined scenarios and behaviours for several digital figures acting in the context of an artistic art installation into an agent based system and develop the corresponding agent behaviours. For his purpose the agent-oriented programming language called AgentSpeak is used.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Tucek_Tom-2016-aai/", } @mastersthesis{Spechtenhauser_Florian_2016, title = "Visual Analytics for Rule-Based Quality Management of Multivariate Data", author = "Florian Spechtenhauser", year = "2016", abstract = "Ensuring an appropriate data quality is a critical topic when analyzing the ever increasing amounts of data collected and generated in today’s world. Depending on the given task, even sophisticated analysis methods may cause misleading results due to an insufficient quality of the data set at hand. In this case, automated plausibility checks based on defined rules are frequently used to detect data problems such as missing data or anomalies. However, defining such rules and using their results for an efficient data quality assessment is a challenging topic. Visualization is powerful to reveal unexpected problems in the data, and can additionally be used to validate results of applied automated plausibility checks. Visual Analytics closes the gap between automated data analysis and visualization by providing means to guide the definition and optimization of plausibility checks in order to use them for a continuous detection and validation of problems detected in the data. This diploma thesis provides a design study of a Visual Analytics approach, called Data Quality Overview, which provides a detailed, yet scalable summary of the results of defined plausibility checks, and includes means for validation and investigation of these results at various levels of detail. The approach is based on a detailed task analysis of data quality assessment, and is validated using a case study based on sensor data from the energy sector in addition to feedback collected from domain experts.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Spechtenhauser_Florian_2016/", } @misc{leimer-2016-rpe, title = "Relation-Based Parametrization and Exploration of Shape Collections", author = "Kurt Leimer and Michael Wimmer and Przemyslaw Musialski", year = "2016", abstract = "With online repositories for 3D models like 3D Warehouse becoming more prevalent and growing ever larger, new possibilities have opened up for both experienced and inexperienced users alike. These large collections of shapes can provide inspiration for designers or make it possible to synthesize new shapes by combining different parts from already existing shapes, which can be both easy to learn and a fast way of creating new shapes. But exploring large shape collections or searching for particular kinds of shapes can be difficult and time-consuming tasks as well, especially considering that online repositories are often disorganized. In our work, we propose a relation-based way to parametrize shape collections, allowing the user to explore the entire set of shapes by controlling a small number of parameters.", month = jul, publisher = "ACM", location = "Anaheim, CA, USA", isbn = "978-1-4503-4371-8", event = "ACM SIGGRAPH 2016", booktitle = "ACM SIGGRAPH 2016 Posters", Conference date = "Poster presented at ACM SIGGRAPH 2016 (2016-07-24--2016-07-28)", note = "34:1--34:1", pages = "34:1 – 34:1", keywords = "3D database exploration, shape analysis, shape collections", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/leimer-2016-rpe/", } @article{Kehl-Direct-2016, title = "Direct Image-to-Geometry Registration Using Mobile Sensor Data", author = "Christian Kehl and Simon Buckley and Robert Gawthorpe and Ivan Viola and John Anthony Howell ", year = "2016", month = jul, journal = "ISPRS Annals of Photogrammetry, Remote Sensing and Spatial InformationSciences", volume = "III-2", doi = "10.5194/isprs-annals-III-2-121-2016", pages = "121–128", pages = "121--128", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Kehl-Direct-2016/", } @article{bernhard-2016-gft, title = " The Accuracy of Gauge-Figure Tasks in Monoscopic and Stereo Displays", author = "Matthias Bernhard and Manuela Waldner and Pascal Plank and Veronika Solteszova and Ivan Viola", year = "2016", abstract = "The gauge-figure task (GFT) is a widespread method used to study surface perception for evaluating rendering and visualization techniques. The authors investigate how accurately slant angles probed on well-defined objects align with the ground truth (GT) in monoscopic and stereoscopic displays. Their results show that the GFT probes taken with well-defined objects align well with the GT in the all-monoscopic and all-stereoscopic conditions. However, they found that a GF rendered in stereo over a monoscopic stimulus results in a strong slant underestimation and that an overestimation occurred in the inverse case (monoscopic GF andstereoscopic stimulus). They discuss how their findings affect the interpretation of absolute GFT measures, compared to the GT normal.", month = jul, journal = "IEEE Computer Graphics and Applications", number = "4", volume = "36", pages = "56--66", keywords = "computer graphics, gauge-figure task, perceptual visualization, shape perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/", } @phdthesis{schmidt-phd, title = "Scalable Comparative Visualization", author = "Johanna Schmidt", year = "2016", abstract = "The comparison of two or more objects is getting an increasingly important task in data analysis. Visualization systems successively have to move from representing one phenomenon to allowing users to analyze several datasets at once. Visualization systems can support the users in several ways. Firstly, comparison tasks can be supported in a very intuitive way by allowing users to place objects that should be compared in an appropriate context. Secondly, visualization systems can explicitly compute differences among the datasets and present the results to the user. In comparative visualization, researchers are working on new approaches for computer-supported techniques that provide data comparison functionality. Techniques from this research field can be used to compare two objects with each other, but often reach their limits if a multitude of objects (i.e., 100 or more) have to be compared. Large data collections that contain a lot of individual, but related, datasets with slightly different characteristics can be called ensembles. The individual datasets being part of an ensemble are called the ensemble members. Ensembles have been created in the simulation domain, especially for weather and climate research, for already quite some time. These domains were greatly driving the development of ensemble visualization techniques. Due to the availability of affordable computing resources and the multitude of different analysis algorithms (e.g., for segmentation), other domains nowadays also face similar problems. All together, this shows a great need for ensemble visualization techniques in various domains. Ensembles can either be analyzed in a feature-based or in a location-based way. In the case of a location-based analysis, the ensemble members are compared based on certain spatial data positions of interest. For such an analysis, local selection and analysis techniques for ensembles are needed. In the course of this thesis different visual analytics techniques for the comparative visualization of datasets have been researched. A special focus has been set on providing scalable techniques, which makes them also suitable for ensemble datasets. The proposed techniques operate on different dataset types in 2D and 3D. In the first part of the thesis, a visual analytics approach for the analysis of 2D image datasets is introduced. The technique analyzes localized differences in 2D images. The approach not only identifies differences in the data, but also provides a technique to quickly find out what the differences are, and judge upon the underlying data. This way patterns can be found in the data, and outliers can be identified very quickly. As a second part of the thesis, a scalable application for the comparison of several similar 3D mesh datasets is described. Such meshes may be, for example, created by point-cloud reconstruction algorithms, using different parameter settings. Similar to the proposed technique for the comparison of 2D images, this application is also scalable to a large number of individual datasets. The application enables the automatic comparison of the meshes, searches interesting regions in the data, and allows users to also concentrate on local regions of interest. The analysis of the local regions is in this case done in 3D. The application provides the possibility to arrange local regions in a parallel coordinates plot. The regions are represented by the axes in the plot, and the input meshes are depicted as polylines. This way it can be very quickly spotted whether meshes produce good/bad results in a certain local region. In the third and last part of the thesis, a technique for the interactive analysis of local regions in a volume ensemble dataset is introduced. Users can pick regions of interest, and these regions can be arranged in a graph according to their similarity. The graph can then be used to detect similar regions with a similar data distribution within the ensemble, and to compare individual ensemble members against the rest of the ensemble. All proposed techniques and applications have been tested with real-world datasets from different domains. The results clearly show the usefulness of the techniques for the comparative analysis of ensembles.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/schmidt-phd/", } @bachelorsthesis{Reimerth_Saskia_2016_RGU, title = "Redesigning the Graphical User Interface of an Application for Visualizing prenatal 3D Ultrasound Scans", author = "Saskia Reimerth", year = "2016", abstract = "The goal of this Bachelor’s thesis was to take an existing application for the representation of volume data and redesign the user interface for the use as an application to represent 3D ultra sound scans of babies. Therefore the original design got evaluated and reduced to the most important functions. A requirement analysis was conducted and based on it, a modern and simple Design got developed. After this iterative process, a prototype was created, which then got evaluated again.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Reimerth_Saskia_2016_RGU/", } @bachelorsthesis{Gadllah_Hani_2016, title = "Comparative Visualization of the Circle of Willis", author = "Hani Gadllah", year = "2016", abstract = "The human brain is supplied with blood by arteries that form a collateral circulation, the so-called Circle of Willis (CoW). The anatomy of the CoW varies considerably among the population. In fact, depending on the study, just 13% to 72% of the population does have the typical textbook illustration of the CoW. Although divergent configurations are usually not pathological, some incomplete configurations increase the risk of stroke. Furthermore, studies suggest an association between certain neurological diseases and abnormal configurations of the CoW. Thus, for the diagnosis and treatment of diverse neurological diseases the assessment of the patient’s CoW is an important issue. This thesis addresses the development of a software for a comparative visualization of the CoWs of a population with the CoWs of a second population. For this purpose, an average CoW is calculated for each of the populations. The two resulting CoWs are then visualized side-by-side, so that the viewer is able to distinguish differences between the CoWs of the two populations with relatively little effort. The aim of this visualization is the support of studies that consider the clinical significance of the different CoW configurations as well as the support of diagnosis and treatment of diseases that are caused by an abnormal configuration of the CoW. The latter can be achieved by comparing the patient’s CoW with datasets of risk groups or with a dataset of a healthy population. ", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Gadllah_Hani_2016/", } @phdthesis{karimov-2016-GIVE, title = "Guided Interactive Volume Editing in Medicine", author = "Alexey Karimov", year = "2016", abstract = "Various medical imaging techniques, such as Computed Tomography, Magnetic Resonance Imaging, Ultrasonic Imaging, are now gold standards in the diagnosis of different diseases. The diagnostic process can be greatly improved with the aid of automatic and interactive analysis tools, which, however, require certain prerequisites in order to operate. Such analysis tools can, for example, be used for pathology assessment, various standardized measurements, treatment and operation planning. One of the major requirements of such tools is the segmentation mask of an object-of-interest. However, the segmentation of medical data remains subject to errors and mistakes. Often, physicians have to manually inspect and correct the segmentation results, as (semi-)automatic techniques do not immediately satisfy the required quality. To this end, interactive segmentation editing is an integral part of medical image processing and visualization. In this thesis, we present three advanced segmentation-editing techniques. They are focused on simple interaction operations that allow the user to edit segmentation masks quickly and effectively. These operations are based on a topology-aware representation that captures structural features of the segmentation mask of the object-of-interest. Firstly, in order to streamline the correction process, we classify segmentation defects according to underlying structural features and propose a correction procedure for each type of defect. This alleviates users from manually applying the proper editing operations, but the segmentation defects still have to be located by users. Secondly, we extend the basic editing process by detecting regions that potentially contain defects. With subsequently suggested correction scenarios, users are hereby immediately able to correct a specific defect, instead of manually searching for defects beforehand. For each suggested correction scenario, we automatically determine the corresponding region of the respective defect in the segmentation mask and propose a suitable correction operation. In order to create the correction scenarios, we detect dissimilarities within the data values of the mask and then classify them according to the characteristics of a certain type of defect. Potential findings are presented with a glyph-based visualization that facilitates users to interactively explore the suggested correction scenarios on different levels-of-detail. As a consequence, our approach even offers users the possibility to fine-tune the chosen correction scenario instead of directly manipulating the segmentation mask, which is a time-consuming and cumbersome task. Third and finally, we guide users through the multitude of suggested correction scenarios of the entire correction process. After statistically evaluating all suggested correction scenarios, we rank them according to their significance of dissimilarities, offering fine-grained editing capabilities at a user-specified level-of-detail. As we visually convey this ranking in a radial layout, users can easily spot and select the most (or the least) dissimilar correction scenario, which improves the segmentation mask mostly towards the desired result. All techniques proposed within this thesis have been evaluated by collaborating radiologists. We assessed the usability, interaction aspects, the accuracy of the results and the expenditure of time of the entire correction process. The outcome of the assessment showed that our guided volume editing not only leads to acceptable segmentation results with only a few interaction steps, but also is applicable to various application scenarios.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/karimov-2016-GIVE/", } @article{ortner-2016-tunnel, title = "Visual analytics and rendering for tunnel crack analysis", author = "Thomas Ortner and Johannes Sorger and Harald Piringer and Gerd Hesina and Eduard Gr\"{o}ller", year = "2016", abstract = "The visual analysis of surface cracks plays an essential role in tunnel maintenance when assessing the condition of a tunnel. To identify patterns of cracks, which endanger the structural integrity of its concrete surface, analysts need an integrated solution for visual analysis of geometric and multivariate data to decide if issuing a repair project is necessary. The primary contribution of this work is a design study, supporting tunnel crack analysis by tightly integrating geometric and attribute views to allow users a holistic visual analysis of geometric representations and multivariate attributes. Our secondary contribution is Visual Analytics and Rendering, a methodological approach which addresses challenges and recurring design questions in integrated systems. We evaluated the tunnel crack analysis solution in informal feedback sessions with experts from tunnel maintenance and surveying. We substantiated the derived methodology by providing guidelines and linking it to examples from the literature.", month = may, journal = "The Visual Computer", volume = "32", number = "6", pages = "859--869", keywords = "Integration of spatial and non-spatial data, Methodology, Visual analytics", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/ortner-2016-tunnel/", } @WorkshopTalk{mindek-2016-utah-talk, title = "Multi-Scale Molecular Data Visualization", author = "Peter Mindek", year = "2016", month = may, event = "QCB Workshop on Visualizing & Modeling Cell Biology", location = "Salt Lake City, Utah, USA", keywords = "molecular visualization, multiscale", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/mindek-2016-utah-talk/", } @techreport{TR1862162, title = "Visual Analysis of Volume Ensembles Based on Local Features", author = "Johanna Schmidt and Bernhard Fr\"{o}hler and Reinhold Preiner and Johannes Kehrer and Eduard Gr\"{o}ller and Stefan Bruckner and Christoph Heinzl", year = "2016", abstract = "Ensemble datasets describe a specific phenomenon (e.g., a simulation scenario or a measurements series) through a large set of individual ensemble members. These individual members typically do not differ too much from each other but rather feature slightly changing characteristics. In many cases, the ensemble members are defined in 3D space, which implies severe challenges when exploring the complete ensembles such as handling occlusions, focus and context or its sheer datasize. In this paper we address these challenges and put our focus on the exploration of local features in 3D volumetric ensemble datasets, not only by visualizing local characteristics, but also by identifying connections to other local features with similar characteristics in the data. We evaluate the variance in the dataset and use the the spatial median (medoid) of the ensemble to visualize the differences in the dataset. This medoid is subsequently used as a representative of the ensemble in 3D. The variance information is used to guide users during the exploration, as regions of high variance also indicate larger changes within the ensemble members. The local characteristics of the regions can be explored by using our proposed 3D probing widgets. These widgets consist of a 3D sphere, which can be positioned at any point in 3D space. While moving a widget, the local data characteristics at the corresponding position are shown in a separate detail view, which depicts the local outliers and their surfaces in comparison to the medoid surface. The 3D probing widgets can also be fixed at a user-defined position of interest. The fixed probing widgets are arranged in a similarity graph to indicate similar local data characteristics. The similarity graph thus allows to explore whether high variances in a certain region are caused by the same dataset members or not. Finally, it is also possible to compare a single member against the rest of the ensemble. We evaluate our technique through two demonstration cases using volumetric multi-label segmentation mask datasets, two from the industrial domain and two from the medical domain.", month = may, number = "TR-186-2-16-2", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "ensemble visualization, guided local exploration, variance analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/TR1862162/", } @talk{Groeller_2016_I3, title = "Understanding Data through Visual Exploration", author = "Eduard Gr\"{o}ller", year = "2016", month = may, event = "Keynote talk at Visionday 2016, Danmarks Tekniske Universitet (DTU Compute)", location = "Keynote talk at Visionday 2016, Danmarks Tekniske Universitet (DTU Compute)", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_I3/", } @inproceedings{ilcik-2016-cescg, title = "20 Years of the Central European Seminar on Computer Graphics", author = "Martin Il\v{c}\'{i}k and Ivana Il\v{c}\'{i}kov\'{a} and Andrej Ferko and Michael Wimmer", year = "2016", abstract = "The Central European Seminar on Computer Graphics is an annual scientific seminar for undergraduate students of computer graphics, vision and visual computing. Its main mission is to promote graphics research and to motivate students to pursue academic careers. An international committee of experts guides their research work for several months. At the end, students present their results at a three days seminar to an audience of approx. 100 students and professors. All attendants actively participate in discussions and workshops focused on academic skills and career planing for young researchers. Interactive sessions on innovation help them to identify the value of their ideas and motivate them to continue in their work.", month = may, publisher = "The Eurographics Association", location = "Lisbon", issn = "1017-4656", editor = "Beatriz Sousa Santos and Jean-Michel Dischler", booktitle = "Eurographics 2016 Education Papers", pages = "25--30", keywords = "Promotion of undergraduate research, Student seminar", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/ilcik-2016-cescg/", } @bachelorsthesis{Langer_Maximillian_DMV, title = "Dynamic Multiscale Vector Volumes", author = "Maximilian Langer", year = "2016", abstract = "Dynamic multiscale vector volumes is a solid representation based on signed distance functions to represent object boundaries. Multiscale vector volumes utilize a binary tree and an embedding mechanism to represent structures on different scales in a compact and efficient way. By extending the representation with an analytical formulation to partly replace signed distance functions, an efficient local animation of boundaries can be achieved. The representation uses a markup language for object definition that allows the user to create their own objects. The concept of dynamic multiscale vector volumes is implemented and tested in the Unity3D editor. The complete rendering is done on the graphics card.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Langer_Maximillian_DMV/", } @bachelorsthesis{schoerkhuber_dominik-2016-baa, title = "Fast KNN in Screenspace on GPGPU", author = "Dominik Sch\"{o}rkhuber", year = "2016", abstract = "Virtualization of realworld objects and scenes became very popular in recent years due to af- fordable laser-scanning technology. Nowadays it’s not only possible to capture static frames but also realtime frame sequences. Rendering of those captures is difficult because visually ap- pealing renderings involve the computation of local surface reconstruction from pointclouds and therefore a lot of preprocessing. This is usually not possible in realtime. One important processing step is the computation of nearest neighbours for each 3d-point. The neighbourhood information is not only used for normal reconstruction and local surface estimation, but can also be utilized for collision detection. In this paper we present a method for computing the k-nearest-neighbor sets for pointclouds in realtime. To achieve high frame rates we parallelize the algorithm on the GPU, using the Nvidia CUDA parallel computation framework. Furthermore computations are limited to op- erate in screen-space, to reduce computational complexity even further, and effectively prevent rendering invisible geometry. We also utilize the invented FastKnn algorithm to estimate local surface reconstruction for splat rendering of pointclouds in realtime and show how it compares to a state of the art algorithm.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "cuda, gpu, nearest neighbor search, knn, screen space", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/schoerkhuber_dominik-2016-baa/", } @misc{klein-2016-WCL, title = "Towards Interactive Visual Exploration of Parallel Programs using a Domain-Specific Language", author = "Tobias Klein and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger and Peter Rautek", year = "2016", abstract = "The use of GPUs and the massively parallel computing paradigm have become wide-spread. We describe a framework for the interactive visualization and visual analysis of the run-time behavior of massively parallel programs, especially OpenCL kernels. This facilitates understanding a program's function and structure, finding the causes of possible slowdowns, locating program bugs, and interactively exploring and visually comparing different code variants in order to improve performance and correctness. Our approach enables very specific, user-centered analysis, both in terms of the recording of the run-time behavior and the visualization itself. Instead of having to manually write instrumented code to record data, simple code annotations tell the source-to-source compiler which code instrumentation to generate automatically. The visualization part of our framework then enables the interactive analysis of kernel run-time behavior in a way that can be very specific to a particular problem or optimization goal, such as analyzing the causes of memory bank conflicts or understanding an entire parallel algorithm.", month = apr, publisher = "ACM", location = "Vienna, Austria", event = "4th International Workshop on OpenCL (IWOCL '16)", Conference date = "Poster presented at 4th International Workshop on OpenCL (IWOCL '16) ()", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/klein-2016-WCL/", } @techreport{karimov-2016-SD, title = "Statistics-Driven Localization of Dissimilarities in Data", author = "Alexey Karimov and Gabriel Mistelbauer and Thomas Auzinger and Eduard Gr\"{o}ller", year = "2016", abstract = "The identification of dissimilar regions in spatial and temporal data is a fundamental part of data exploration. This process takes place in applications, such as biomedical image processing as well as climatic data analysis. We propose a general solution for this task by employing well-founded statistical tools. From a large set of candidate regions, we derive an empirical distribution of the data and perform statistical hypothesis testing to obtain p-values as measures of dissimilarity. Having p-values, we quantify differences and rank regions on a global scale according to their dissimilarity to user-specified exemplar regions. We demonstrate our approach and its generality with two application scenarios, namely interactive exploration of climatic data and segmentation editing in the medical domain. In both cases our data exploration protocol unifies the interactive data analysis, guiding the user towards regions with the most relevant dissimilarity characteristics. The dissimilarity analysis results are conveyed with a radial tree, which prevents the user from searching exhaustively through all the data.", month = apr, number = "TR-186-2-16-1", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/karimov-2016-SD/", } @talk{Groeller_2016_I1, title = "The Certainly Uncertain Uncertainty Talk", author = "Eduard Gr\"{o}ller", year = "2016", month = apr, event = "Second Workshop on Uncertainty, Technische Universit\"{a}t M\"{u}nchen, Informatik 15 (Computer Graphik 6 Visualisierung)", location = "Second Workshop on Uncertainty, Technische Universit\"{a}t M\"{u}nchen, Informatik 15 (Computer Graphik 6 Visualisierung)", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_I1/", } @talk{Groeller_2016_I2, title = "Visual Computing and Comparative Visualization", author = "Eduard Gr\"{o}ller", year = "2016", month = apr, event = "Invited Talk", location = " Czech Technical University, Faculty of Electrical Engineering, Department of Computer Graphics and Interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_I2/", } @bachelorsthesis{Oancea_Stefan_2016_VOT, title = "Variance Orientation Transform Detection of Early Osteoarthritis in Knee Trabecular Bone", author = "Stefan Ovidiu Oancea", year = "2016", abstract = "Since the fractal properties of the knee trabecular bone were discovered, fractal methods for analyzing bone surface radiographic projections have gained more attention. This is partly due to the fact that radiography is the cheapest imaging technique in routine clinical screening and partly due to the fact that it was shown that the trabecular bones of osteoarthritic patients indicate early deformations, even long before the characteristic join loss occurs. The ultimate goal of such an algorithm would be to differentiate healthy from unhealthy trabecular bone. This paper presents a report of our implementation of the Variance Orientation Transform (VOT) algorithm, a fractal method, which unlike other similar methods, is able to quantify bone texture in different directions and over different scales of measurement. It is based on the idea that a single fractal dimension value is not enough to describe such a complex structure as the trabecular bone and thus, VOT calculates more descriptive fractal dimensions called fractal signatures (FSs). In Chapters 1 and 2 we introduce the notion of fractals and the theoretical background behind them and the VOT algorithm. In Chapter 3 similar techniques for analyzing trabecular bone are presented and in Chapter 4 our particular attempt at implementing VOT is described in detail; moreover, in the same Chapter VOT is validated using some artificially generated fractal surfaces and the ability of differentiating healthy and affected bone is also investigated. The last Chapter, Chapter 5, covers further possible ideas of improving and testing of the algorithm.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Oancea_Stefan_2016_VOT/", } @mastersthesis{Kroesl_Katharina_2016_PPT, title = "Interactive, Progressive Photon Tracing using a Multi-Resolution Image-Filtering Approach", author = "Katharina Kr\"{o}sl", year = "2016", abstract = "Modern workflows in architectural planning and lighting design require physically reliable lighting simulations for detailed and complex 3D models. Current workflows for luminaire design and lighting design are not tailored to each other. During luminaire design, CAD programs are used to create 3D models of luminaires, and offline rendering tools are used to visualize the light distribution. In lighting design, light concepts are explored by placing light sources - previously created during luminaire design - in a 3D scene using an interactive light-planning software, but it is not possible to modify the light sources themselves. This thesis presents an interactive global-illumination algorithm to simulate the light distribution of a luminaire. The algorithm produces visually pleasing intermediate results at interactive frame rates, before converging to a physically plausible solution that can be imported as a representation of a light source into a light-planning software. We combine an interactive, progressive photon-tracing algorithm with a multi-resolution image-filtering approach. Our algorithm iteratively emits photons into a 3D scene containing the model of a luminaire and progressively refines results. We use mipmaps to create a multi-resolution approach and incorporate image-filtering techniques to obtain visually pleasing intermediate results. Evaluations based on objective quality metrics show that the presented image-filtering approach increases image quality when compared to non-filtered results. The proposed algorithm provides fast previews and allows interactive modifications of the geometry and material properties of the luminaire in real time. This reduces time between modification iterations and therefore turns luminaire design into an interactive process that reduces overall production time.Furthermore, the presented approach integrates luminaire design into lighting design and therefore provides a new way to combine two former decoupled workflows.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "photon tracing, luminaire design, lighting design", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Kroesl_Katharina_2016_PPT/", } @mastersthesis{prieler_daniel-2013-da, title = "Real-time Meshing for Noisy Points", author = "Daniel Prieler", year = "2016", abstract = "The increasing availability of 3D scanning devices in both industrial and entertainment environments (e.g., Microsoft Kinect) creates a demand for fast and reliable resampling and reconstruction techniques. Point clouds, especially raw range images, are often non-uniformly sampled and subject to non-uniform noise levels. Current state-of-the-art techniques often require user-provided parameters that estimate the noise level of the point cloud. This produces sub-optimal results for point sets with varying noise extent. We propose an isotropically fair neighborhood definition which is specifically designed to address non-uniformly sampled point clouds. Our iterative point cloud resampling method estimates and adapts to the local noise level at each sample. This increases the reconstruction quality for point clouds with high noise levels while being completely parameter free. The data structures built during the resampling process are reused to speed up the process of creating a consistent normal orientation. Evaluation of the re- sampling quality shows that our technique outperforms current state-of-the-art methods for varying noise levels and non-uniform sampling. Both the resampling algorithm and the subsequent consistent normal orientation operate locally and can be implemented efficiently in parallel. Our GPU sphere regression implementation outperforms the stan- dard sequential procedure by a factor of 20.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "surface fitting, surface reconstruction, noise, meshing, real-time, CUDA", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/prieler_daniel-2013-da/", } @article{arikan-2015-dmrt, title = "Multi-Depth-Map Raytracing for Efficient Large-Scene Reconstruction", author = "Murat Arikan and Reinhold Preiner and Michael Wimmer", year = "2016", abstract = "With the enormous advances of the acquisition technology over the last years, fast processing and high-quality visualization of large point clouds have gained increasing attention. Commonly, a mesh surface is reconstructed from the point cloud and a high-resolution texture is generated over the mesh from the images taken at the site to represent surface materials. However, this global reconstruction and texturing approach becomes impractical with increasing data sizes. Recently, due to its potential for scalability and extensibility, a method for texturing a set of depth maps in a preprocessing and stitching them at runtime has been proposed to represent large scenes. However, the rendering performance of this method is strongly dependent on the number of depth maps and their resolution. Moreover, for the proposed scene representation, every single depth map has to be textured by the images, which in practice heavily increases processing costs. In this paper, we present a novel method to break these dependencies by introducing an efficient raytracing of multiple depth maps. In a preprocessing phase, we first generate high-resolution textured depth maps by rendering the input points from image cameras and then perform a graph-cut based optimization to assign a small subset of these points to the images. At runtime, we use the resulting point-to-image assignments (1) to identify for each view ray which depth map contains the closest ray-surface intersection and (2) to efficiently compute this intersection point. The resulting algorithm accelerates both the texturing and the rendering of the depth maps by an order of magnitude.", month = feb, doi = "10.1109/TVCG.2015.2430333", issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "2", volume = "22", pages = "1127--1137", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/arikan-2015-dmrt/", } @article{ortner-2016-visaware, title = "Vis-a-ware: Integrating spatial and non-spatial visualization for visibility-aware urban planning", author = "Thomas Ortner and Johannes Sorger and Harald Steinlechner and Gerd Hesina and Harald Piringer and Eduard Gr\"{o}ller", year = "2016", abstract = "3D visibility analysis plays a key role in urban planning for assessing the visual impact of proposed buildings on the cityscape. A call for proposals typically yields around 30 candidate buildings that need to be evaluated with respect to selected viewpoints. Current visibility analysis methods are very time-consuming and limited to a small number of viewpoints. Further, analysts neither have measures to evaluate candidates quantitatively, nor to compare them efficiently. The primary contribution of this work is the design study of Vis-A-Ware, a visualization system to qualitatively and quantitatively evaluate, rank, and compare visibility data of candidate buildings with respect to a large number of viewpoints. Vis-A-Ware features a 3D spatial view of an urban scene and non-spatial views of data derived from visibility evaluations, which are tightly integrated by linked interaction. To enable a quantitative evaluation we developed four metrics in accordance with experts from urban planning. We illustrate the applicability of Vis-A-Ware on the basis of a use case scenario and present results from informal feedback sessions with domain experts from urban planning and development. This feedback suggests that Vis-A-Ware is a valuable tool for visibility analysis allowing analysts to answer complex questions more efficiently and objectively.", month = jan, journal = "Visualization and Computer Graphics, IEEE Transactions on", issn = "1077-2626 ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/ortner-2016-visaware/", } @article{Viola_Ivan_2015_AAM, title = "AnimoAminoMiner: Exploration of Protein Tunnels and their Properties in Molecular Dynamics", author = "Jan Byska and Mathieu Le Muzic and Eduard Gr\"{o}ller and Ivan Viola and Barbora Kozlikova", year = "2016", abstract = "In this paper we propose a novel method for the interactive exploration of protein tunnels. The basic principle of our approach is that we entirely abstract from the 3D/4D space the simulated phenomenon is embedded in. A complex 3D structure and its curvature information is represented only by a straightened tunnel centerline and its width profile. This representation focuses on a key aspect of the studied geometry and frees up graphical estate to key chemical and physical properties represented by surrounding amino acids. The method shows the detailed tunnel profile and its temporal aggregation. The profile is interactively linked with a visual overview of all amino acids which are lining the tunnel over time. In this overview, each amino acid is represented by a set of colored lines depicting the spatial and temporal impact of the amino acid on the corresponding tunnel. This representation clearly shows the importance of amino acids with respect to selected criteria. It helps the biochemists to select the candidate amino acids for mutation which changes the protein function in a desired way. The AnimoAminoMiner was designed in close cooperation with domain experts. Its usefulness is documented by their feedback and a case study, which are included.", month = jan, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "22", number = "1", issn = "1077-2626", pages = "747--756", keywords = "aggregation, molecular dynamics, Protein, interaction, tunnel", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Viola_Ivan_2015_AAM/", } @article{sorger-2015-litevis, title = "LiteVis: Integrated Visualization for Simulation-Based Decision Support in Lighting Design", author = "Johannes Sorger and Thomas Ortner and Christian Luksch and Michael Schw\"{a}rzler and Eduard Gr\"{o}ller and Harald Piringer", year = "2016", abstract = "State-of-the-art lighting design is based on physically accurate lighting simulations of scenes such as offices. The simulation results support lighting designers in the creation of lighting configurations, which must meet contradicting customer objectives regarding quality and price while conforming to industry standards. However, current tools for lighting design impede rapid feedback cycles. On the one side, they decouple analysis and simulation specification. On the other side, they lack capabilities for a detailed comparison of multiple configurations. The primary contribution of this paper is a design study of LiteVis, a system for efficient decision support in lighting design. LiteVis tightly integrates global illumination-based lighting simulation, a spatial representation of the scene, and non-spatial visualizations of parameters and result indicators. This enables an efficient iterative cycle of simulation parametrization and analysis. Specifically, a novel visualization supports decision making by ranking simulated lighting configurations with regard to a weight-based prioritization of objectives that considers both spatial and non-spatial characteristics. In the spatial domain, novel concepts support a detailed comparison of illumination scenarios. We demonstrate LiteVis using a real-world use case and report qualitative feedback of lighting designers. This feedback indicates that LiteVis successfully supports lighting designers to achieve key tasks more efficiently and with greater certainty.", month = jan, journal = "Visualization and Computer Graphics, IEEE Transactions on", volume = "22", number = "1", issn = "1077-2626 ", pages = "290--299", keywords = "Integrating Spatial and Non-Spatial Data", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/sorger-2015-litevis/", } @mastersthesis{Labschuetz_Matthias_2016_AHD, title = "An Adaptive, Hybrid Data Structure for Sparse Volume Data on the GPU", author = "Matthias Labsch\"{u}tz", year = "2016", abstract = "Dealing with large, sparse, volume data on the GPU is a necessity in many applications such as volume rendering, processing or simulation. The limited memory budget of modern GPUs restricts users from uploading large volume data-sets entirely. Fortunately, sparse data, i.e., data containing large empty regions, can be represented more efficiently compared to a common dense array. Our approach makes it possible to upload a full data set even if the original volume does not fit on the GPU. In previous work, a variety of sparse data structures have been utilized on the GPU, each with different properties. Tree representations, such as the octree, kd tree or N3 tree, provide a hierarchical solution for data sets of relatively low sparsity. For data sets of medium sparsity, spatial hashing makes more efficient access and storage possible. Extremely sparse data can be efficiently represented and accessed via binary search in sorted voxel lists. Our observation is, that data sets often contain regions of different sparsity. Depending on the sparsity of a region, a specific data structure (e.g., an octree, a voxel list) requires the least memory to store the data. We formulate an algorithm that is able to automatically find this memory-optimal representation. By using such a combination of different data structures, we achieve an even better representation than any single data structure for real world data sets. We call such a data structure a hybrid data structure. Any sparse data structure introduces an access overhead. For example, the access to an octree requires one additional indirection per height level of the tree. A voxel list has to be searched to retrieve a specific element. By using a hybrid data structure, we also introduce an access overhead on top of the overhead that comes from using a sparse data structure. In our work we introduce JiTTree, which utilizes a data aware just-in-time compilation step to improve the access performance of our hybrid data structure. We show that the implementation of our hybrid data structure effectively reduces the memory requirement of sparse data sets. JiTTree can improve the performance of hybrid bricking for certain access patterns such as stencil accesses.", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Labschuetz_Matthias_2016_AHD/", } @article{Labschuetz_Matthias_2016_JITT, title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure", author = "Matthias Labsch\"{u}tz and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger and Peter Rautek", year = "2016", abstract = "Sparse volume data structures enable the efficient representation of large but sparse volumes in GPU memory for computation and visualization. However, the choice of a specific data structure for a given data set depends on several factors, such as the memory budget, the sparsity of the data, and data access patterns. In general, there is no single optimal sparse data structure, but a set of several candidates with individual strengths and drawbacks. One solution to this problem are hybrid data structures which locally adapt themselves to the sparsity. However, they typically suffer from increased traversal overhead which limits their utility in many applications. This paper presents JiTTree, a novel sparse hybrid volume data structure that uses just-in-time compilation to overcome these problems. By combining multiple sparse data structures and reducing traversal overhead we leverage their individual advantages. We demonstrate that hybrid data structures adapt well to a large range of data sets. They are especially superior to other sparse data structures for data sets that locally vary in sparsity. Possible optimization criteria are memory, performance and a combination thereof. Through just-in-time (JIT) compilation, JiTTree reduces the traversal overhead of the resulting optimal data structure. As a result, our hybrid volume data structure enables efficient computations on the GPU, while being superior in terms of memory usage when compared to non-hybrid data structures.", month = jan, issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", note = "Published in January 2016", number = "1", volume = "22", event = "IEEE SciVis 2015", location = "Chicago, IL, USA", pages = "1025--1034", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Labschuetz_Matthias_2016_JITT/", } @talk{intel2016, title = "Real-time Subsurface Scattering, Light Transport and Two Minute Papers", author = "Karoly Zsolnai-Feh\'{e}r", year = "2016", abstract = "K\'{a}roly Zsolnai-Feh\'{e}r is a PhD student at the Technical University of Vienna. He is going to talk about Separable Subsurface Scattering, his recent collaboration with Activision Blizzard and the University of Zaragoza to render subsurface scattering in real time on the GPU for computer games. Next, he'll transition to global illumination and explain a a simple extension to Metropolis Light Transport to improve the convergence speed on a variety of scenes. The third part will be about Two Minute Papers, a YouTube web series that he started recently to communicate the most beautiful research results to a general audience. ", event = "Intel Graphics Architecture Forum", location = "Intel Advanced Rendering Technology group", keywords = "global illumination, light transport, subsurface scattering, two minute papers", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/intel2016/", } @habilthesis{viola-evr, title = "Effective Visual Representations", author = "Ivan Viola", year = "2016", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/viola-evr/", } @article{malan_fluoro, title = "A fluoroscopy-based planning and guidance software tool for minimally invasive hip refixation by cement injection.", author = "DF Malan and SJ van der Walt and Renata Raidou and B van den Berg and BC Stoel and CP Botha and RG Nelissen and ER Valstar", year = "2016", abstract = "PURPOSE: In orthopaedics, minimally invasive injection of bone cement is an established technique. We present HipRFX, a software tool for planning and guiding a cement injection procedure for stabilizing a loosening hip prosthesis. HipRFX works by analysing a pre-operative CT and intraoperative C-arm fluoroscopic images. METHODS: HipRFX simulates the intraoperative fluoroscopic views that a surgeon would see on a display panel. Structures are rendered by modelling their X-ray attenuation. These are then compared to actual fluoroscopic images which allow cement volumes to be estimated. Five human cadaver legs were used to validate the software in conjunction with real percutaneous cement injection into artificially created periprothetic lesions. RESULTS: Based on intraoperatively obtained fluoroscopic images, our software was able to estimate the cement volume that reached the pre-operatively planned targets. The actual median target lesion volume was 3.58 ml (range 3.17-4.64 ml). The median error in computed cement filling, as a percentage of target volume, was 5.3% (range 2.2-14.8%). Cement filling was between 17.6 and 55.4% (median 51.8%). CONCLUSIONS: As a proof of concept, HipRFX was capable of simulating intraoperative fluoroscopic C-arm images. Furthermore, it provided estimates of the fraction of injected cement deposited at its intended target location, as opposed to cement that leaked away. This level of knowledge is usually unavailable to the surgeon viewing a fluoroscopic image and may aid in evaluating the success of a percutaneous cement injection intervention.", journal = "International journal of computer assisted radiology and surgery,", number = "2", volume = "11", pages = "281--296", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/malan_fluoro/", } @article{vad-2016-bre, title = "Generalized box-plot for root growth ensembles", author = "Viktor Vad and Douglas Cedrim and Wolfgang Busch and Peter Filzmoser and Ivan Viola", year = "2016", abstract = "Background In the field of root biology there has been a remarkable progress in root phenotyping, which is the efficient acquisition and quantitative description of root morphology. What is currently missing are means to efficiently explore, exchange and present the massive amount of acquired, and often time dependent root phenotypes. Results In this work, we present visual summaries of root ensembles by aggregating root images with identical genetic characteristics. We use the generalized box plot concept with a new formulation of data depth. In addition to spatial distributions, we created a visual representation to encode temporal distributions associated with the development of root individuals. Conclusions The new formulation of data depth allows for much faster implementation close to interactive frame rates. This allows us to present the statistics from bootstrapping that characterize the root sample set quality. As a positive side effect of the new data-depth formulation we are able to define the geometric median for the curve ensemble, which was well received by the domain experts.", journal = "BMC Bioinformatics", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/vad-2016-bre/", } @article{Cornel2016CFM, title = "Composite Flow Maps", author = "Daniel Cornel and Artem Konev and Berhard Sadransky and Zsolt Horvath and Andrea Brambilla and Ivan Viola and J\"{u}rgen Waser", year = "2016", abstract = "Flow maps are widely used to provide an overview of geospatial transportation data. Existing solutions lack the support for the interactive exploration of multiple flow components at once. Flow components are given by different materials being transported, different flow directions, or by the need for comparing alternative scenarios. In this paper, we combine flows as individual ribbons in one composite flow map. The presented approach can handle an arbitrary number of sources and sinks. To avoid visual clutter, we simplify our flow maps based on a force-driven algorithm, accounting for restrictions with respect to application semantics. The goal is to preserve important characteristics of the geospatial context. This feature also enables us to highlight relevant spatial information on top of the flow map such as traffic conditions or accessibility. The flow map is computed on the basis of flows between zones. We describe a method for auto-deriving zones from geospatial data according to application requirements. We demonstrate the method in real-world applications, including transportation logistics, evacuation procedures, and water simulation. Our results are evaluated with experts from corresponding fields.", journal = "Computer Graphics Forum", volume = "35", number = "3", pages = "461--470", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Cornel2016CFM/", } @article{Krone2016VABC, title = "Visual Analysis of Biomolecular Cavities: State of the Art", author = "Michael Krone and Barbora Kozlikova and Norbert Lindow and Marc Baaden and Daniel Baum and Julius Parulek and Hans-Christian Hege and Ivan Viola", year = "2016", abstract = "In this report we review and structure the branch of molecular visualization that is concerned with the visual analysis of cavities in macromolecular protein structures. First the necessary background, the domain terminology, and the goals of analytical reasoning are introduced. Based on a comprehensive collection of relevant research works, we present a novel classification for cavity detection approaches and structure them into four distinct classes: grid-based, Voronoi-based, surface-based, and probe-based methods. The subclasses are then formed by their combinations. We match these approaches with corresponding visualization technologies starting with direct 3D visualization, followed with non-spatial visualization techniques that for example abstract the interactions between structures into a relational graph, straighten the cavity of interest to see its profile in one view, or aggregate the time sequence into a single contour plot. We also discuss the current state of methods for the visual analysis of cavities in dynamic data such as molecular dynamics simulations. Finally, we give an overview of the most common tools that are actively developed and used in the structural biology and biochemistry research. Our report is concluded by an outlook on future challenges in the field.", journal = "Computer Graphics Forum", volume = "35", number = "3", pages = "527--551", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Krone2016VABC/", } @inproceedings{Reisacher2016, title = "CellPathway: A Simulation Tool for Illustrative Visualization of Biochemical Networks", author = "Matthias Reisacher and Mathieu Le Muzic and Ivan Viola", year = "2016", abstract = "The molecular knowledge about complex biochemical reaction networks in biotechnology is crucial and has received a lot of attention lately. As a consequence, multiple visualization programs have been already developed to illustrate the anatomy of a cell. However, since a real cell performs millions of reactions every second to sustain live, it is necessary to move from anatomical to physiological illustrations to communicate knowledge about the behavior of a cell more accurately. In this thesis I propose a reaction system including a collision detection algorithm, which is able to work at the level of single atoms, to enable precise simulation of molecular interactions. To visually explain molecular activities during the simulation process, a real-time glow effect in combination with a clipping object have been implemented. Since intracellular processes are performed with a set of chemical transformations, a hierarchical structure is used to illustrate the impact of one reaction on the entire simulation. The CellPathway system integrates acceleration techniques to render large datasets containing millions of atoms in real-time, while the reaction system is processed directly on the GPU to enable simulation with more than 1000 molecules. Furthermore, a graphical user interface has been implemented to allow the user to control parameters during simulation interactively.", location = "Pilsen, Czech Republic", booktitle = "Proceedings of WSCG", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Reisacher2016/", } @article{miao_2016_cgf, title = "Visual Quantification of the Circle of Willis: An Automated Identification and Standardized Representation", author = "Haichao Miao and Gabriel Mistelbauer and Christian Nasel and Eduard Gr\"{o}ller", year = "2016", abstract = "This paper presents a method for the visual quantification of cerebral arteries, known as the Circle of Willis (CoW). It is an arterial structure with the responsibility of supplying the brain with blood, however, dysfunctions can lead to strokes. The diagnosis of such a time-critical/urgent event depends on the expertise of radiologists and the applied software tools. They use basic display methods of the volumetric data without any support of advanced image processing and visualization techniques. The goal of this paper is to present an automated method for the standardized description of cerebral arteries in stroke patients in order to provide an overview of the CoW's configuration. This novel representation provides visual indications of problematic areas as well as straightforward comparisons between multiple patients. Additionally, we offer a pipeline for extracting the CoW from Time-of-Flight Magnetic Resonance Angiography (TOF-MRA) data sets together with an enumeration technique for labelling the arterial segments by detecting the main supplying arteries of the CoW. We evaluated the feasibility of our visual quantification approach in a study of 63 TOF-MRA data sets and compared our findings to those of three radiologists. The obtained results demonstrate that our proposed techniques are effective in detecting the arteries and visually capturing the overall configuration of the CoW.", issn = "1467-8659", journal = "Computer Graphics Forum", keywords = "Circle of Willis, medical visualization, information visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/miao_2016_cgf/", } @article{Solteszova2016, title = "Output-Sensitive Filtering of Streaming Volume Data", author = "Veronika Solteszova and {\AA}smund Birkeland and Sergej Stoppel and Ivan Viola and Stefan Bruckner", year = "2016", abstract = "Real-time volume data acquisition poses substantial challenges for the traditional visualization pipeline where data enhancement is typically seen as a pre-processing step. In the case of 4D ultrasound data, for instance, costly processing operations to reduce noise and to remove artefacts need to be executed for every frame. To enable the use of high-quality filtering operations in such scenarios, we propose an output-sensitive approach to the visualization of streaming volume data. Our method evaluates the potential contribution of all voxels to the final image, allowing us to skip expensive processing operations that have little or no effect on the visualization. As filtering operations modify the data values which may affect the visibility, our main contribution is a fast scheme to predict their maximum effect on the final image. Our approach prioritizes filtering of voxels with high contribution to the final visualization based on a maximal permissible error per pixel. With zero permissible error, the optimized filtering will yield a result that is identical to filtering of the entire volume. We provide a thorough technical evaluation of the approach and demonstrate it on several typical scenarios that require on-the-fly processing.", journal = "Computer Graphics Forum", volume = "35", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Solteszova2016/", } @article{Reichinger_2016, title = "Gesture-Based Interactive Audio Guide on Tactile Reliefs", author = "Andreas Reichinger and Stefan Maierhofer and Anton Fuhrmann and Werner Purgathofer", year = "2016", abstract = "For blind and visually impaired people, tactile reliefs offer many benefits over the more classic raised line drawings or tactile diagrams, as depth, 3D shape and surface textures are directly perceivable. However, without proper guidance some reliefs are still difficult to explore autonomously. In this work, we present a gesture-controlled interactive audio guide (IAG) based on recent low-cost depth cameras that operates directly on relief surfaces. The interactively explorable, location-dependent verbal descriptions promise rapid tactile accessibility to 2.5D spatial information in a home or education setting, to on-line resources, or as a kiosk installation at public places. We present a working prototype, discuss design decisions and present the results of two evaluation sessions with a total of 20 visually impaired test users.", month = oct, journal = "Proceedings of the 18th International ACM SIGACCESS Conference on Computers & Accessibility", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Reichinger_2016/", } @article{raidou_miccai16, title = "Employing Visual Analytics to Aid the Design of White Matter Hyperintensity Classifiers.", author = "Renata Raidou and Hugo J. Kuijf and Neda Sepasian and Nicola Pezzotti and Willem H. Bouvy and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2016", abstract = "Accurate segmentation of brain white matter hyperintensi-ties (WMHs) is important for prognosis and disease monitoring. To thisend, classi ers are often trained { usually, using T1 and FLAIR weightedMR images. Incorporating additional features, derived from di usionweighted MRI, could improve classi cation. However, the multitude ofdi usion-derived features requires selecting the most adequate. For this,automated feature selection is commonly employed, which can often besub-optimal. In this work, we propose a di erent approach, introducing asemi-automated pipeline to select interactively features for WMH classi -cation. The advantage of this solution is the integration of the knowledgeand skills of experts in the process. In our pipeline, a Visual Analytics(VA) system is employed, to enable user-driven feature selection. Theresulting features are T1, FLAIR, Mean Di usivity (MD), and RadialDi usivity (RD) { and secondarily,CSand Fractional Anisotropy (FA).The next step in the pipeline is to train a classi er with these features,and compare its results to a similar classi er, used in previous work withautomated feature selection. Finally, VA is employed again, to analyzeand understand the classi er performance and results.", journal = "Proceedings of International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI)", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/raidou_miccai16/", } @article{Groeller_2016_P1, title = " Visual Analysis of Defects in Glass Fiber Reinforced Polymers for 4DCT Interrupted In situ Tests", author = "Aleksandr Amirkhanov and Artem Amirkhanov and Dietmar Salaberger and Johannes Kastner and Eduard Gr\"{o}ller and Christoph Heinzl", year = "2016", abstract = "Material engineers use interrupted in situ tensile testing to investigate the damage mechanisms in composite materials. For each subsequent scan, the load is incrementally increased until the specimen is completely fractured. During the interrupted in situ testing of glass fiber reinforced polymers (GFRPs) defects of four types are expected to appear: matrix fracture, fiber/matrix debonding, fiber pull-out, and fiber fracture. There is a growing demand for the detection and analysis of these defects among the material engineers. In this paper, we present a novel workflow for the detection, classification, and visual analysis of defects in GFRPs using interrupted in situ tensile tests in combination with X-ray Computed Tomography. The workflow is based on the automatic extraction of defects and fibers. We introduce the automatic Defect Classifier assigning the most suitable type to each defect based on its geometrical features. We present a visual analysis system that integrates four visualization methods: 1) the Defect Viewer highlights defects with visually encoded type in the context of the original CT image, 2) the Defect Density Maps provide an overview of the defect distributions according to type in 2D and 3D, 3) the Final Fracture Surface estimates the material fracture’s location and displays it as a 3D surface, 4) the 3D Magic Lens enables interactive exploration by combining detailed visualizations in the region of interest with overview visualizations as context. In collaboration with material engineers, we evaluate our solution and demonstrate its practical applicability.", journal = "Computer Graphics Forum (2016)", volume = " 35", number = "3", issn = "doi: 10.1111/cgf.12896", pages = "201--210", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P1/", } @article{Groeller_2016_P2, title = "Towards Quantitative Visual Analytics with Structured Brushing and Linked Statistics", author = "Sanjin Rados and Rainer Splechtna and Kresimir Matkovic and Mario Duras and Eduard Gr\"{o}ller and Helwig Hauser", year = "2016", abstract = "Until now a lot of visual analytics predominantly delivers qualitative results—based, for example, on a continuous color map or a detailed spatial encoding. Important target applications, however, such as medical diagnosis and decision making, clearly benefit from quantitative analysis results. In this paper we propose several specific extensions to the well-established concept of linking&brushing in order to make the analysis results more quantitative. We structure the brushing space in order to improve the reproducibility of the brushing operation, e.g., by introducing the percentile grid. We also enhance the linked visualization with overlaid descriptive statistics to enable a more quantitative reading of the resulting focus+context visualization. Addition- ally, we introduce two novel brushing techniques: the percentile brush and the Mahalanobis brush. Both use the underlying data to support statistically meaningful interactions with the data. We illustrate the use of the new techniques in the context of two case studies, one based on meteorological data and the other one focused on data from the automotive industry where we evaluate a shaft design in the context of mechanical power transmission in cars.", journal = "Computer Graphics Forum (2016)", volume = "35", number = "3", pages = "251--260", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P2/", } @article{Groeller_2016_P3, title = "State of the Art in Transfer Functions for Direct Volume Rendering", author = "P. Ljung and J. Kr\"{u}ger and Eduard Gr\"{o}ller and Markus Hadwiger and C. Hansen and Anders Ynnerman", year = "2016", abstract = "A central topic in scientific visualization is the transfer function (TF) for volume rendering. The TF serves a fundamental role in translating scalar and multivariate data into color and opacity to express and reveal the relevant features present in the data studied. Beyond this core functionality, TFs also serve as a tool for encoding and utilizing domain knowledge and as an expression for visual design of material appearances. TFs also enable interactive volumetric exploration of complex data. The purpose of this state-of-the-art report (STAR) is to provide an overview of research into the various aspects of TFs, which lead to interpretation of the underlying data through the use of meaningful visual representations. The STAR classifies TF research into the following aspects: dimensionality, derived attributes, aggregated attributes, rendering aspects, automation, and user interfaces. The STAR concludes with some interesting research challenges that form the basis of an agenda for the development of next generation TF tools and methodologies.", journal = "Computer Graphics Forum (2016)", volume = "35", number = "3", pages = "669--691", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P3/", } @inproceedings{ilcik-2016-cmssg, title = "Collaborative Modeling with Symbolic Shape Grammars", author = "Martin Il\v{c}\'{i}k and Michael Wimmer", year = "2016", abstract = "Generative design based on symbolic grammars is oriented on individual artists. Team work is not supported since single scripts produced by various artists have to be linked and maintained manually with a lot of effort. The main motivation for a collaborative modeling framework was to reduce the script management required for large projects. We achieved even more by extending the design paradigm to a cloud environment where everyone is part of a huge virtual team. The main contribution of the presented work is a web-based modeling system with a specialized variant of a symbolic shape grammar.", location = "Oulu, Finland", booktitle = "Proceedings of eCAADe 2016", pages = "417--426", keywords = "collaboration, procedural modeling, procedural modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/ilcik-2016-cmssg/", } @bachelorsthesis{kendlbacher-2016, title = "Introduction Of OpenStreetMap For The Automatic Generation Of Destination Maps", author = "Felix Kendlbacher", year = "2016", abstract = "A destination map allows all travelers, within the given region of interest, to reach the same destination, no matter where exactly they start their journey at. For this purpose the important roads for traversing the road network are chosen, while the non-important roads are removed for clarity. These selected roads are then simplified to reduce unnecessary complexity, while maintaining the structure of the road network. The chosen data is then tweaked to increase the visibility of the small roads. During this process the layout is iteratively changed and evaluated according to certain aspects, and if a newly proposed layout performs better than the old one, that new one forms the basis for all future changes. In this work a method for automatically creating destination maps is implemented based on the algorithm proposed in the paper by Kopf et al. [KAB+10], with efforts made to improve the original work.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Destination Maps, OpenStreetMap", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/kendlbacher-2016/", } @mastersthesis{leimer-2016-coan, title = "Co-Analysis and Parameterization of 3D Shape Collections for Shape Synthesis", author = "Kurt Leimer", year = "2016", abstract = "With online model repositories growing larger every day, both experienced and inexperienced modelers are presented with new possibilities for content creation. One such possibility is the creation of new shapes by combining parts of already existing shapes. The advantages of this shape synthesis method are that it takes less time than traditional modeling approaches and that it can be used even by inexperienced users. This thesis introduces a framework for this type of shape synthesis that consists of four stages, incorporating a new way for parameterization and exploration of shape collections. Using a modular and extensible approach, the co-analysis stage groups parts of shapes into categories based on their function, creating a correspondence between parts of different shapes. By analyzing relations between pairs of parts and how their spatial arrangements vary across the collection, a small number of parameters is found in the parameterization stage. Starting with an initial shape, these parameters can then be used to browse the collection in the exploration stage, either by altering the parameters directly or by interacting with the shape itself. Finally, in the synthesis stage a new shape can be created by exchanging parts of the initial shape with corresponding parts of the shapes found during the exploration.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "computer graphics, 3D shape segmentation, 3D shape co-analysis, 3D shape processing", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/leimer-2016-coan/", } @article{raidou_eurovis16, title = "Visual Analysis of Tumor Control Models for Prediction of Radiotherapy Response.", author = "Renata Raidou and Oscar Casares-Magaz and Ludvig Paul Muren and Uulke A van der Heide and Jarle Roervik and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2016", abstract = "In radiotherapy, tumors are irradiated with a high dose, while surrounding healthy tissues are spared. To quantify the prob-ability that a tumor is effectively treated with a given dose, statistical models were built and employed in clinical research.These are called tumor control probability (TCP) models. Recently, TCP models started incorporating additional informationfrom imaging modalities. In this way, patient-specific properties of tumor tissues are included, improving the radiobiologicalaccuracy of models. Yet, the employed imaging modalities are subject to uncertainties with significant impact on the modelingoutcome, while the models are sensitive to a number of parameter assumptions. Currently, uncertainty and parameter sensitivityare not incorporated in the analysis, due to time and resource constraints. To this end, we propose a visual tool that enablesclinical researchers working on TCP modeling, to explore the information provided by their models, to discover new knowledgeand to confirm or generate hypotheses within their data. Our approach incorporates the following four main components: (1)It supports the exploration of uncertainty and its effect on TCP models; (2) It facilitates parameter sensitivity analysis to com-mon assumptions; (3) It enables the identification of inter-patient response variability; (4) It allows starting the analysis fromthe desired treatment outcome, to identify treatment strategies that achieve it. We conducted an evaluation with nine clinicalresearchers. All participants agreed that the proposed visual tool provides better understanding and new opportunities for theexploration and analysis of TCP modeling.", journal = "EuroVis - Eurographics/IEEE-VGTC Symposium on Visualization 2016", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/raidou_eurovis16/", } @bachelorsthesis{glinzner-2016-tex, title = "Texturing of 3D Objects using Simple Physics and Equilateral Triangle Patches", author = "Matthias Glinzner", year = "2016", abstract = "Visualizing cells, in particular cell membranes, is the inspiration for this work. The goal of the presented methods is the efficient visualization of phospholipid membranes. A prominent role hereby plays the concept of seamlessly texturing a surface in threedimensional space. By using suitable texture patches, memory consumption can be kept low. The developed algorithm first creates a texture mesh that stays faithful to the surface structure of a user-provided input-mesh. This texture mesh consists of equilateral triangles. The triangulation is achieved by first simulating repulsion between the vertices making up the texture mesh. This way they are moved around on the surface of the input-mesh until they are uniformly distributed. Mapping texture onto equilateral triangles becomes trivial if triangular texture patches are assumed as well. Thus, seamless texturing is achieved. The implementation is described in detail, followed by the demonstration of results. Also, an exemplary performance-analysis is given, highlighting benefits and shortcomings of the algorithm, especially concerning runtime. Additionally, a short overview of related and prior work is given. The used framework is Unity 3D.", note = "1", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/glinzner-2016-tex/", } @bachelorsthesis{donabauer-2015-asc, title = "Advanced Screen Capturing", author = "Johanna Donabauer", year = "2016", abstract = "Creating a free-form screen shot to get what is needed is not trivial task. Moving the mouse cursor around the desired region to indicate the boundary of the screen snippet often yields jagged outlines in the result image. Furthermore, it is not that easy to fully exclude unnecessary screen elements during the selection process. As a consequence, the created screen shot may also contain some small portions of surrounded image elements which are unwanted in the result image or some areas may be accidentally truncated. To overcome these limitations, Advanced Screen Capturing combines screen capture functionalities with image processing methods as an alternative way for creating free-form screen shots. The selection of the desired screen region is done by roughly selecting the needed image area. After extracting the highlighted screen region, a stroke mask is calculated for detecting the surrounded image elements by the drawn stroke. This stroke mask forms the basis for rejecting or accepting partly crossed image regions or truncating large ones. The image regions for accepting and rejection are detected as contours. By combining the stroke mask and the detected contours the segmentation mask is generated, where image regions are either fully included, fully rejected, or cut, depending on their amount of overlays with the stroke mask. Based on the segmentation mask, the original captured screen shot is segmented. In the end, the calculated result image contains all user defined relevant image information and adapted image boundaries. The introduced screen capture and contour-based segmentation algorithm works best for screen elements like text, charts, different kinds of shapes and a combination of these. Advanced Screen Capturing can be used as a library for integration purposes in other systems or as a stand alone desktop application. It was implemented by using C++ and Qt Framework and the OpenCV library.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/donabauer-2015-asc/", } @bachelorsthesis{Mayrhauser-2016-Cnc, title = "Migration of Surface Curve to Most Concave Isoline", author = "Maximilian Mayrhauser", year = "2016", abstract = "In this paper, I present a solution for migrating a curve on a three dimensional surface to the most concave isoline in its vicinity. Essentially, this problem statement tackles mesh segmentation from a different angle. The search for a suitable segmentation boundary is reduced to a shortest path problem. First, a graph is built using the mesh’s vertices and edges near the input curve. Then, the shortest path is found using the Dijkstra algorithm, whereas a modified weighting scheme that makes the passing through of concave edges cheaper, among other factors, results in a path suitable as segmentation boundary. The final algorithm provides segmentation boundaries of a quality similar to existing segmentation algorithms. The runtime generally lies below a second, thus making it viable for on the go optimization of the user’s input.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Mayrhauser-2016-Cnc/", } @bachelorsthesis{dworschak-2016-szcm, title = "Semantically Zoomable Choropleth Map", author = "Lucas Dworschak", year = "2016", abstract = "Geographic visualizations, like choropleth maps, are used to visualize data on geographic regions. In this thesis a choropleth map was implemented to display quantities of publications of scientific texts and papers. With the use of a choropleth map the viewer is able to interpret how quantitative data changes on different geographic regions. The main feature that distinguishes the implemented choropleth map from conventional ones is the use of map navigation. The choropleth map can be zoomed and panned to different map regions. What makes this map navigation so special is the use of semantic zooming to allow the level of detail of the map to change on discrete zoom steps. The change of the level of detail means that administrative regions are being divided into smaller administrative regions which are than again colorized individually to create a new, more detailed, choropleth map. Other interactions with the choropleth map are introduced additionally. The other interactions with the map range from the manipulation of the map appearance to filtering the displayed data set.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/dworschak-2016-szcm/", } @article{musialski_2016_sosp, title = "Non-Linear Shape Optimization Using Local Subspace Projections", author = "Przemyslaw Musialski and Christian Hafner and Florian Rist and Michael Birsak and Michael Wimmer and Leif Kobbelt", year = "2016", abstract = "In this paper we present a novel method for non-linear shape optimization of 3d objects given by their surface representation. Our method takes advantage of the fact that various shape properties of interest give rise to underdetermined design spaces implying the existence of many good solutions. Our algorithm exploits this by performing iterative projections of the problem to local subspaces where it can be solved much more efficiently using standard numerical routines. We demonstrate how this approach can be utilized for various shape optimization tasks using different shape parameterizations. In particular, we show how to efficiently optimize natural frequencies, mass properties, as well as the structural yield strength of a solid body. Our method is flexible, easy to implement, and very fast.", journal = "ACM Transactions on Graphics", volume = "35", number = "4", issn = "0730-0301", doi = "10.1145/2897824.2925886", pages = "87:1--87:13", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/musialski_2016_sosp/", } @bachelorsthesis{moerth_eric-2016-3DF, title = "3D-Printing of Fetal Ultrasound", author = "Eric M\"{o}rth", year = "2016", abstract = "The 3D ultrasound in prenatal diagnostics is nowadays a standard investigation in the field of medical informatics. The acquired data can be used in lots of different applications. One of them is to fabricate the fetus model using a 3D printer. The problem here is to convert the given volume data into a structure that can be printed. Current generation of 3D printers expect as an input objects defined by closed surfaces. This work handles the problem of how to calculate such surfaces. Our solution relies on the marching cubes algorithm that extracts the surface out of the volume data. The extracted surface is then refined. The last processing step is to save the data into an suitable data format. The results demonstrate that it is possible to print the fetus model from the 3D ultrasound data and that people are able to perceive the face of the fetus in the fabricated objects.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/moerth_eric-2016-3DF/", } @book{Chen-Information-2016, title = "Information Theory Tools for Visualization", author = "Min Chen and Miquel Feixas and Ivan Viola and Anton Bardera and Mateu Sbert and Han Wei Shen", year = "2016", isbn = "9781498740937", pages = "194", publisher = "CRC Press", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Chen-Information-2016/", } @article{ohrhallinger-2016-sgp, title = "Curve Reconstruction with Many Fewer Samples", author = "Stefan Ohrhallinger and Scott A. Mitchell and Michael Wimmer", year = "2016", abstract = "We consider the problem of sampling points from a collection of smooth curves in the plane, such that the Crust family of proximity-based reconstruction algorithms can rebuild the curves. Reconstruction requires a dense sampling of local features, i.e., parts of the curve that are close in Euclidean distance but far apart geodesically. We show that epsilon<0.47-sampling is sufficient for our proposed HNN-CRUST variant, improving upon the state-of-the-art requirement of epsilon<1/3-sampling. Thus we may reconstruct curves with many fewer samples. We also present a new sampling scheme that reduces the required density even further than epsilon<0.47-sampling. We achieve this by better controlling the spacing between geodesically consecutive points. Our novel sampling condition is based on the reach, the minimum local feature size along intervals between samples. This is mathematically closer to the reconstruction density requirements, particularly near sharp-angled features. We prove lower and upper bounds on reach rho-sampling density in terms of lfs epsilon-sampling and demonstrate that we typically reduce the required number of samples for reconstruction by more than half. ", journal = "Computer Graphics Forum", volume = "35", number = "5", issn = "1467-8659", pages = "167--176", keywords = "sampling condition, curve reconstruction, curve sampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/ohrhallinger-2016-sgp/", } @bachelorsthesis{Reisacher_Matthias_CPW, title = "CellPathway a Simulation Tool for Illustrative Visualization of Biochemical Networks", author = "Matthias Reisacher", year = "2016", abstract = "The molecular knowledge about complex biochemical reaction networks in biotechnology is crucial and has received a lot of attention lately. As a consequence, multiple visualization programs have been already developed to illustrate the anatomy of a cell. However, since a real cell performs millions of reactions every second to sustain live, it is necessary to move from anatomical to physiological illustrations to communicate knowledge about the behavior of a cell more accurately. In this thesis I propose a reaction system including a collision detection algorithm, which is able to work at the level of single atoms, to enable precise simulation of molecular interactions. To visually explain molecular activities during the simulation process, a real-time glow effect in combination with a clipping object have been implemented. Since intracellular processes are performed with a set of chemical transformations, a hierarchical structure is used to illustrate the impact of one reaction on the entire simulation. The CellPathway system integrates acceleration techniques to render large datasets containing millions of atoms in real-time, while the reaction system is processed directly on the GPU to enable simulation with more than 1000 molecules. Furthermore, a graphical user interface has been implemented to allow the user to control parameters during simulation interactively.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Reisacher_Matthias_CPW/", } @inproceedings{Reichinger_Fuhrmann_2016, title = "A Concept for Re-Useable Interactive Tactile Reliefs", author = "Andreas Reichinger and Anton Fuhrmann and Stefan Maierhofer and Werner Purgathofer", year = "2016", abstract = "We introduce a concept for a relief-printer, a novel production method for tactile reliefs, that allows to reproduce bas-reliefs of several centimeters height difference. In contrast to available methods, this printer will have a much smaller preparation time, and does not consume material nor produce waste, since it is based on a re-usable medium, suitable for temporary printouts. Second, we sketch a concept for the autonomous, interactive exploration of tactile reliefs, in the form of a gesture-controlled audio guide, based on recent depth cameras. Especially the combination of both approaches promises rapid tactile accessibility to 2.5D spatial information in a home or education setting, to on-line resources, or as a kiosk installation in museums.", booktitle = "A Concept for Re-Useable Interactive Tactile Reliefs", journal = "ICCHP 2016, Part II", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Reichinger_Fuhrmann_2016/", } @article{lemuzic-mindek-2016-viseq, title = "Visibility Equalizer: Cutaway Visualization of Mesoscopic Biological Models", author = "Mathieu Le Muzic and Peter Mindek and Johannes Sorger and Ludovic Autin and David Goodsell and Ivan Viola", year = "2016", abstract = "In scientific illustrations and visualization, cutaway views are often employed as an effective technique for occlusion management in densely packed scenes.We propose a novel method for authoring cutaway illustrations of mesoscopic biological models. In contrast to the existing cutaway algorithms, we take advantage of the specific nature of the biological models. These models consist of thousands of instances with a comparably smaller number of different types. Our method constitutes a two stage process. In the first step, clipping objects are placed in the scene, creating a cutaway visualization of the model. During this process, a hierarchical list of stacked bars inform the user about the instance visibility distribution of each individual molecular type in the scene. In the second step, the visibility of each molecular type is fine-tuned through these bars, which at this point act as interactive visibility equalizers. An evaluation of our technique with domain experts confirmed that our equalizer-based approach for visibility specification is valuable and effective for both, scientific and educational purposes.", journal = "Computer Graphics Forum", volume = "35", number = "3", keywords = "molecular visualization, visibility, occlusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/lemuzic-mindek-2016-viseq/", } @inproceedings{Waldin_Nicholas_2016_Chameleon, title = "Chameleon Dynamic Color Mapping for Multi-Scale Structural Biology Models", author = "Nicholas Waldin and Mathieu Le Muzic and Manuela Waldner and Eduard Gr\"{o}ller and David Goodsell and Ludovic Autin and Ivan Viola", year = "2016", abstract = "Visualization of structural biology data uses color to categorize or separate dense structures into particular semantic units. In multiscale models of viruses or bacteria, there are atoms on the finest level of detail, then amino-acids, secondary structures, macromolecules, up to the compartment level and, in all these levels, elements can be visually distinguished by color. However, currently only single scale coloring schemes are utilized that show information for one particular scale only. We present a novel technology which adaptively, based on the current scale level, adjusts the color scheme to depict or distinguish the currently best visible structural information. We treat the color as a visual resource that is distributed given a particular demand. The changes of the color scheme are seamlessly interpolated between the color scheme from the previous views into a given new one. With such dynamic multi-scale color mapping we ensure that the viewer is able to distinguish structural detail that is shown on any given scale. This technique has been tested by users with an expertise in structural biology and has been overall well received.", event = "VCBM", booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/", } @article{Waldin_Nicholas_2016_Colormaps, title = "Personalized 2D color maps", author = "Nicholas Waldin and Matthias Bernhard and Ivan Viola", year = "2016", abstract = "2D color maps are often used to visually encode complex data characteristics such as heat or height. The comprehension of color maps in visualization is affected by the display (e.g., a monitor) and the perceptual abilities of the viewer. In this paper we present a novel method to measure a user׳s ability to distinguish colors of a two-dimensional color map on a given monitor. We show how to adapt the color map to the user and display to optimally compensate for the measured deficiencies. Furthermore, we improve user acceptance of the calibration procedure by transforming the calibration into a game. The user has to sort colors along a line in a 3D color space in a competitive fashion. The errors the user makes in sorting these lines are used to adapt the color map to his perceptual capabilities.", issn = "0097-8493", journal = "Computers & Graphics", volume = "59", pages = "143--150", keywords = "Color; Perception, Perception, Color vision deficiency", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Colormaps/", } @inproceedings{Waldin_Nicholas_2016_Individualization, title = "Individualization of 2D Color Maps for People with Color Vision Deficiencies", author = "Nicholas Waldin and Matthias Bernhard and Peter Rautek and Ivan Viola", year = "2016", location = "Slomenice, Slovakia", booktitle = "Proceedings of the 32Nd Spring Conference on Computer Graphics", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Individualization/", }