@article{Red_Andreas_2015_FFT, title = "Fuzzy feature tracking", author = "Andreas Reh and Aleksandr Amirkhanov and Johann Kastner and Eduard Gr\"{o}ller and Christoph Heinzl", year = "2015", abstract = "In situ analysis is becoming increasingly important in the evaluation of existing as well as novel materials and components. In this domain, specialists require answers on questions such as: How does a process change internal and external structures of a component? or How do the internal features evolve?In this work, we present a novel integrated visual analysis tool to evaluate series of X-ray Computed Tomography (XCT) data. We therefore process volume datasets of a series of XCT scans, which non-destructively cover the evolution of a process by in situ scans. After the extraction of individual features, a feature tracking algorithm is applied to detect changes of features throughout the series as events. We distinguish between creation, continuation, split, merge and dissipation events. As an explicit tracking is not always possible, we introduce the computation of a Tracking Uncertainty. We visualize the data together with the determined events in multiple linked-views, each emphasizing individual aspects of the 4D-XCT dataset series: A Volume Player and a 3D Data View show the spatial feature information, whereas the global overview of the feature evolution is visualized in the Event Explorer. The Event Explorer allows for interactive exploration and selection of the events of interest. The selection is further used as basis to calculate a Fuzzy Tracking Graph visualizing the global evolution of the features over the whole series.We finally demonstrate the results and advantages of the proposed tool using various real world applications, such as a wood shrinkage analysis and an AlSiC alloy under thermal load. Graphical abstractDisplay Omitted HighlightsWe calculate a Tracking Uncertainty in order to find correlated features.The Event Explorer shows a global overview of events and feature properties.The Fuzzy Tracking Graph is used to track features through all time-steps.The Volume Player shows control elements to traverse the steps of a dataset series.", month = dec, journal = "Computers and Graphics", number = "PB", volume = "53", pages = "177--184", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Red_Andreas_2015_FFT/", } @misc{Diehl_2015, title = "Albero: A Visual Analytics Tool for Probabilistic Weather Forecasting.", author = "Alexandra Diehl and Leandro Pelorosso and Kresimir Matkovic and Claudio Delrieux and Marc Ruiz and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2015", month = nov, location = "University of Buenos Aires", event = "Poster at Workshop Big Data & Environment", Conference date = "Poster presented at Poster at Workshop Big Data & Environment (2015-11)", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Diehl_2015/", } @bachelorsthesis{krakhofer-2015, title = "Automatic Summarization of Image Sets", author = "Michael Krakhofer", year = "2015", abstract = "The aim of this bachelor thesis was to find different approaches to solve the following problem: A set of images is divided into different groups in a way that every image belongs to exactly one group. The user defines which image belongs to which group. The images should be positioned into a grid of a user-defined size, so that they can be recognised as clusters easily. To do so, it must be guaranteed, that every image has at least one neighbour image regarding the manhatten distance which belongs to the same group. Also there must not be any gaps, or clusters which split into several parts. Multiple algorithms which are able to solve this task are to be found and analysed. There are many approaches whose goal is to create an image summarisation out of a set of images. But these approaches place the images in a way that the groups are shaped by the algorithm itself. Yet there is no method published, which lets the user define the involved groups and their associated set of images. This thesis meets this requirements and offers different solutions for this task.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/krakhofer-2015/", } @mastersthesis{Klein_Tobias_2015TIV, title = "Towards Interactive Visual Exploration of Parallel Programs using a Domain-specific Language", author = "Tobias Klein", year = "2015", abstract = "The utilization of GPUs and the massively parallel computing paradigm have become increasingly prominent in many research domains. Recent developments of platforms, such as OpenCL and CUDA, enable the usage of heterogeneous parallel computing in a wide-spread field. However, the efficient utilization of parallel hardware requires profound knowledge of parallel programming and the hardware itself. Our approach presents a domain-specific language that facilitates fast prototyping of parallel programs, and a visual explorer which reveals their execution behavior. With the aid of our visualizations, interactions with the hardware become visible, supporting the comprehensibility of the program and its utilization of the hardware components. Furthermore, we aggregate behavior that leads to common issues in parallel programming and present it in a clearly structured view to the user. We augment the standard methods for debugging and profiling by a visual approach that enables a more problem-specific, fine-grained way of analyzing parallel code. Our framework parses all program code and user-specified annotations in order to enable automatic, yet configurable code instrumentation. The resulting recordings are directly linked to interactive visualizations created with the well-known D3 (data-driven documents) framework. To demonstrate our approach, we present two case studies about the visual analysis of memory bank conflicts and branch divergence. They investigate different parallel reduction implementations and a common image processing example (all from the NVIDIA OpenCL SDK). We show that our visualizations provide immediate visual insight in the execution behavior of the program and that the performance influence of the implementations is directly reflected visually.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Klein_Tobias_2015TIV/", } @misc{Ganuza_2015, title = "Interactive Semi-Automatic Categorization for Spinel Group Minerals", author = " Mar\'{i}a Luj\'{a}n Ganuza and Maria Florencia Gargiulo and Gabriela Ferracutti and Silvia Castro and Ernesto Bjerg and Eduard Gr\"{o}ller and Kresimir Matkovic", year = "2015", month = oct, event = "IEEE VIS 2015 ", editor = "IEEE", Conference date = "Poster presented at IEEE VIS 2015 (2015-10)", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Ganuza_2015/", } @misc{Ganuza_ML_2015_ISA, title = "Interactive Semi-Automatic Categorization for Spinel Group Minerals", author = " Mar\'{i}a Luj\'{a}n Ganuza and Maria Florencia Gargiulo and Gabriela Ferracutti and Silvia Castro and Ernesto Bjerg and Eduard Gr\"{o}ller and Kresimir Matkovic", year = "2015", abstract = "Spinel group minerals are excellent indicators of geological environments (tectonic settings). In 2001, Barnes and Roeder defined a set of contours corresponding to compositional fields for spinel group minerals. Geologists typically use this contours to estimate the tectonic environment where a particular spinel composition could have been formed. This task is prone to errors and requires tedious manual comparison of overlapping diagrams. We introduce a semi-automatic, interactive detection of tectonic settings for an arbitrary dataset based on the Barnes and Roeder contours. The new approach integrates the mentioned contours and includes a novel interaction called contour brush. The new methodology is integrated in the Spinel Explorer system and it improves the scientist's workflow significantly.", month = oct, location = "Chicago, IL, USA ", isbn = " 978-1-4673-9783-4", event = "2015 IEEE Conference on Visual Analytics Science and Technology (VAST) (2015)", Conference date = "Poster presented at 2015 IEEE Conference on Visual Analytics Science and Technology (VAST) (2015) (2015-10-25--2015-10-30)", note = "197--198", pages = "197 – 198", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Ganuza_ML_2015_ISA/", } @inproceedings{sorger-2015-taxintec, title = "A Taxonomy of Integration Techniques for Spatial and Non-Spatial Visualizations", author = "Johannes Sorger and Thomas Ortner and Harald Piringer and Gerd Hesina and Eduard Gr\"{o}ller", year = "2015", abstract = "Research on visual data representations is traditionally classified into methods assuming an inherent mapping from data values to spatial coordinates (scientific visualization and real-time rendering) and methods for abstract data lacking explicit spatial references (information visualization). In practice, however, many applications need to analyze data comprising abstract and spatial information, thereby spanning both visualization domains. Traditional classification schemes do not support a formal description of these integrated systems. The contribution of this paper is a taxonomy that describes a holistic design space for integrating components of spatial and abstract visualizations. We structure a visualization into three components: Data, Visual, and Navigation. These components can be linked to build integrated visualizations. Our taxonomy provides an alternative view on the field of visualization in a time where the border between scientific and information visualization becomes blurred.", month = oct, series = "Springer Lecture Notes in Computer Science (LNCS) series", publisher = "The Eurographics Association", location = "Aachen, Germany", issn = "0302-9743", editor = "David Bommes and Tobias Ritschel and Thomas Schultz", booktitle = "20th International Symposium on Vision, Modeling and Visualization (VMV 2015)", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/sorger-2015-taxintec/", } @bachelorsthesis{Pfahler_David_2015_IPI, title = "In-Place Interaction in Dashboards", author = "David Pfahler", year = "2015", abstract = "Interaction techniques for information visualization systems require control elements to interact with the data. These elements consume visual space and this is one of the major limiting factors of a visual analytics dashboard. This work introduces interaction methods to control the visualizations of a dashboard without the need of additional visual space. These in-place control elements give the dashboard designer the possibility to design more compact dashboards, while maintaining the same functionality without using space filling controls. This is achieved by hovering areas in the visualizations which opens controls or triggers events.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Pfahler_David_2015_IPI/", } @article{Labschuetz_Matthias_2015_JIT, title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure", author = "Matthias Labsch\"{u}tz and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger and Peter Rautek", year = "2015", abstract = "Sparse volume data structures enable the efficient representation of large but sparse volumes in GPU memory for computation and visualization. However, the choice of a specific data structure for a given data set depends on several factors, such as the memory budget, the sparsity of the data, and data access patterns. In general, there is no single optimal sparse data structure, but a set of several candidates with individual strengths and drawbacks. One solution to this problem are hybrid data structures which locally adapt themselves to the sparsity. However, they typically suffer from increased traversal overhead which limits their utility in many applications. This paper presents JiTTree, a novel sparse hybrid volume data structure that uses just-in-time compilation to overcome these problems. By combining multiple sparse data structures and reducing traversal overhead we leverage their individual advantages. We demonstrate that hybrid data structures adapt well to a large range of data sets. They are especially superior to other sparse data structures for data sets that locally vary in sparsity. Possible optimization criteria are memory, performance and a combination thereof. Through just-in-time (JIT) compilation, JiTTree reduces the traversal overhead of the resulting optimal data structure. As a result, our hybrid volume data structure enables efficient computations on the GPU, while being superior in terms of memory usage when compared to non-hybrid data structures.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "22", number = "1", note = "Published in January 2016", issn = "1077-2626", pages = "1025--1034", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Labschuetz_Matthias_2015_JIT/", } @inproceedings{cellVIEW_2015, title = "cellVIEW: a Tool for Illustrative and Multi-Scale Rendering of Large Biomolecular Datasets", author = "Mathieu Le Muzic and Ludovic Autin and Julius Parulek and Ivan Viola", year = "2015", abstract = "In this article we introduce cellVIEW, a new system to interactively visualize large biomolecular datasets on the atomic level. Our tool is unique and has been specifically designed to match the ambitions of our domain experts to model and interactively visualize structures comprised of several billions atom. The cellVIEW system integrates acceleration techniques to allow for real-time graphics performance of 60 Hz display rate on datasets representing large viruses and bacterial organisms. Inspired by the work of scientific illustrators, we propose a level-of-detail scheme which purpose is two-fold: accelerating the rendering and reducing visual clutter. The main part of our datasets is made out of macromolecules, but it also comprises nucleic acids strands which are stored as sets of control points. For that specific case, we extend our rendering method to support the dynamic generation of DNA strands directly on the GPU. It is noteworthy that our tool has been directly implemented inside a game engine. We chose to rely on a third party engine to reduce software development work-load and to make bleeding-edge graphics techniques more accessible to the end-users. To our knowledge cellVIEW is the only suitable solution for interactive visualization of large bimolecular landscapes on the atomic level and is freely available to use and extend.", month = sep, isbn = "978-3-905674-82-8", publisher = "The Eurographics Association", organization = "EG Digital Library", location = "Chester, United Kingdom", issn = "2070-5786", editor = "Katja B\"{u}hler and Lars Linsen and Nigel W. John", booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine", pages = "61--70", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/cellVIEW_2015/", } @inproceedings{Miao_2015_VCBM, title = "CoWRadar: Visual Quantification of the Circle of Willis in Stroke Patients", author = "Haichao Miao and Gabriel Mistelbauer and Christian Nasel and Eduard Gr\"{o}ller", year = "2015", abstract = "This paper presents a method for the visual quantification of cerebral arteries, known as the Circle of Willis (CoW). The CoW is an arterial structure that is responsible for the brain’s blood supply. Dysfunctions of this arterial circle can lead to strokes. The diagnosis relies on the radiologist’s expertise and the software tools used. These tools consist of very basic display methods of the volumetric data without support of advanced technologies in medical image processing and visualization. The goal of this paper is to create an automated method for the standardized description of cerebral arteries in stroke patients in order to provide an overview of the CoW’s configuration. This novel display provides visual indications of problematic areas as well as straightforward comparisons between multiple patients. Additionally, we offer a pipeline for extracting the CoW from Time-of-Flight Magnetic Resonance Angiography (TOF-MRA) data sets. An enumeration technique for the labeling of the arterial segments is therefore suggested. We also propose a method for detecting the CoW’s main supplying arteries by analyzing the coronal, sagittal and transverse image planes of the data sets. We evaluated the feasibility of our visual quantification approach in a study of 63 TOF-MRA data sets and compared our findings to those of three radiologists. The obtained results demonstrate that our proposed techniques are effective in detecting the arteries of the CoW.", month = sep, isbn = "978-3-905674-82-8", publisher = "The Eurographics Association", organization = "EG Digital Library", location = "Chester, United Kingdom", issn = "2070-5786", editor = "Katja B\"{u}hler and Lars Linsen and Nigel W. John", booktitle = "EG Workshop on Visual Computing for Biology and Medicine", pages = "1--10", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Miao_2015_VCBM/", } @inproceedings{Purg2015-China, title = "Industrial Use of Mixed Reality in VRVis Projects", author = "Werner Purgathofer and Clemens Arth and Dieter Schmalstieg", year = "2015", month = sep, organization = "VR Kebao (Tianjin) Science & Technology Co., Ltd", location = "Peking, China", booktitle = "Proceedings of VISIC", pages = "19--23", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Purg2015-China/", } @inproceedings{SCHUETZ-2015-HQP, title = "High-Quality Point Based Rendering Using Fast Single Pass Interpolation", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2015", abstract = "We present a method to improve the visual quality of point cloud renderings through a nearest-neighbor-like interpolation of points. This allows applications to render points at larger sizes in order to reduce holes, without reducing the readability of fine details due to occluding points. The implementation requires only few modifications to existing shaders, making it eligible to be integrated in software applications without major design changes.", month = sep, location = "Granada, Spain", booktitle = "Proceedings of Digital Heritage 2015 Short Papers", pages = "369--372", keywords = "point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/SCHUETZ-2015-HQP/", } @mastersthesis{hafner-2015-eigf, title = "Optimization of Natural Frequencies for Fabrication-Aware Shape Modeling", author = "Christian Hafner", year = "2015", abstract = "Given a target shape and a target frequency, we automatically synthesize shapes that exhibit this frequency as part of their natural spectrum while resembling the target shape as closely as possible. We propose three shape parametrization methods that afford meaningful degrees of freedom in the design of instruments such as marimbas and bells. The design space is based on the representation of a solid as the volume enclosed by an outer surface and an inner offset surface. In order to evaluate the natural frequency spectrum of a solid, we employ finite element modal analysis and evaluate the suitability of different element types. We propose a fabrication method for the production of optimized instruments by an amateur craftsperson using sand or rubber molds. The efficiency of our method is demonstrated by the production of a simple tin bell and a more complicated bell in the shape of a rabbit. We achieve agreement with the predicted pitch frequencies of 2.8% and 6% respectively. These physical results are supplemented by a number of computational results that explore the optimization of harmonic ratios and the influence of mesh resolution and mesh smoothness on the accuracy of the finite element model.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "physically based modeling, computer graphics, digital fabrication, 3D shape optmization, 3D shape processing", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/hafner-2015-eigf/", } @bachelorsthesis{Kollmann-2015-DoF, title = "Depth of Field: Point Splatting on Per-Pixel Layers", author = "Christoph Kollmann", year = "2015", abstract = "This bachelor’s thesis is about a depth of field rendering algorithm using point splatting on per-pixel layers. The intuitive procedure just uses specific adjustable parameters for the lens of a camera to generate the desired effect in real-time. Using nothing but the depth information of a scene, the depth of field can be computed. The so called circles of confusion are the representing value of the blur rate for objects that are out of focus. Depending on the distance between the focal plane and the current pixel, an expansion takes place because of the redirecting nature of a lens. The following required information is retrieved using an intensity distribution function. To avoid intensity leakage and provide a full opaque picture, the entire effect is done discrete on the screen space. This prevents subsampling and enables almost correct alpha value normalization. As stated before, an intensity distribution function is required, but in a discrete form. This is an addition to the paper, where that function is preprocessed in the application and provided for the shader directly. A high resolution texture of the desired intensity distribution function is provided as input to a shader program that renders multiple discrete versions. Another further addition prepares the texture by normalization, consuming more memory, but saving lots of time during computation. The last part of this paper includes a defocused highlights implementation, where a tone-mapping is applied to mimic them. In general, the luminance of a pixel is computed by a simple formula, summing up the three colors with adjustment factors. Depending on this value and some parameters, the color is adjusted where bright regions are enhanced to construct this effect. This thesis implements and extends this work and is used to test its performance after seven years when it was published. Using different configurations for the effect, the provided stable framerates of the results are used as metric for benchmarking. ", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "depth-of-field, deferred rendering, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Kollmann-2015-DoF/", } @mastersthesis{May_Michael_2015_DIS, title = "Design and Implementation of a Shader Infrastructure and Abstraction Layer", author = "Michael May", year = "2015", abstract = "Programming the GPU is more important than ever, but the organization and development of shader code for the GPU is a difficult task. Can this process be embedded into the high level language C#, gain from the features of its toolchain and enrich shader development? For this purpose this thesis describes the design and implementation of a framework to abstract and embed shader development into C# with an internal domain-specific language (iDSL for short) as front-end and a plug-in system in the back-end to support expandable optimizations and different shader languages as targets. The implemented framework fits shader development into the C# toolchain, supporting autocompletion, and type error checking in the editor. The system offers good modularity and encourages developing shaders in reusable parts. This diploma thesis was developed in cooperation with VRVis Research Center in Vienna, Austria.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/May_Michael_2015_DIS/", } @mastersthesis{Fruehstueck_Anna_2015_DOM, title = "Decoupling Object Manipulation from Rendering in a Thin Client Visualization System", author = "Anna Fr\"{u}hst\"{u}ck", year = "2015", abstract = "Often, users of visualization applications do not have access to high performance systems for the computationally demanding visualization tasks. Rendering the visualization remotely and using a thin client (e.g. a web browser) to display the result enable the users to access the visualization even on devices that do not target graphics processing. However, the flexibility to manipulate the data set interactively suffers in thin-client configurations. This makes a meaningful interaction with data sets that contain many different objects difficult. This is especially true in in-situ visualization scenarios, where direct interaction with the data can be challenging. We solve this problem by proposing an approach that employs a deferred visualization pipeline to divide the visualization computation between a server and a client. Our thin client is built on web technologies (HTML5, JavaScript) and is integrated with the D3 library to enable interactive data-driven visualizations. An intermediate representation of objects is introduced which describes the data that is transferred from the server to the client on request. The server side carries out the computationally expensive parts of the pipeline while the client retains extensive flexibility by performing object modification tasks without requiring a re-rendering of the data. We introduce a novel Volume Object Model as an intermediate representation for deferred visualization. This model consists of metadata and pre-rendered visualizations of each object in a data set. In order to guarantee client-side interactivity even on large data sets, the client only receives the metadata of all objects for a pre-visualization step. By allowing the user to perform filtering using the metadata alone, the complexity of the requested visualization data can be reduced from the client side before streaming any image data. Only when the user is satisfied, the object images are requested from the server. In combination with the metadata, the final visualization can then be reconstructed from the individual images. Moreover, all objects in the visualization can be investigated and changed programmatically by the user via an integrated console. In summary, our system allows for fully interactive object-related visualization tasks in a web browser without triggering an expensive re-rendering on the server.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Fruehstueck_Anna_2015_DOM/", } @mastersthesis{Hirsch_Christian_2015_ABL, title = "Automatic Breast Lesion Examination of DCE-MRI Data Based on Fourier Analysis", author = "Christian Hirsch", year = "2015", abstract = "Breast cancer is the second most common cancer death among women in developed countries. In less developed countries it has a mortality rate of about 25% rendering it the most common cancer death. It has been demonstrated that an early breast cancer diagnosis significantly reduces the mortality. In addition to mammography and breast ultrasound, Dynamic Contrast-Enhanced Magnetic Resonance Imaging (DCE-MRI) is the modality with the highest sensitivity for breast cancer detection. However, systems for automatic lesion analysis are scarce. This thesis proposes a method for lesion evaluation without the necessity of tumor segmentation. The observer has to define a Region Of Interest (ROI) covering the lesion in question and the proposed system performs an automated lesion inspection by computing its Fourier transform. Using the Fourier transformed volume we compute the inertia tensor of its magnitude. Based on the gathered information, the G\"{o}ttinger score, which is a common breast cancer analysis scheme, is computed and the features are presented in newly create plots. These plots are evaluated with a survey where radiologists participated. The G\"{o}ttinger score assigns a numeric value for the following features: shape, boundary, Internal Enhancement Characteristics (IEC), Initial Signal Increase (ISI) and Post Initial Signal (PIS). We tested our method on 22 breast tumors (14 malignant and 8 benign ones). Subsequently, we compared our results to the classification of an experienced radiologist. The automatic boundary classification has an accuracy of 0.818, the shape 0.773 and the IEC 0.886 compared to the radiologist’s results. An evaluation of the accuracy of the benign vs. malignant classification shows that the method has an accuracy of 0.682 for all the G\"{o}ttinger score features and 0.772 using only the shape, boundary and IEC. The evaluation of the plot shows that radiologist like the visual representation of the G\"{o}ttinger score for single lesions, they, however, refuse the plots where multiple lesions are presented in one visual representation.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Hirsch_Christian_2015_ABL/", } @bachelorsthesis{Gusenbauer_Matthias_2015_ANM, title = "A Novel Mapping of Arbitrary Precision Integer Operations to the GPU", author = "Matthias Gusenbauer", year = "2015", abstract = "With modern processing hardware converging on the physical barrier in terms of transistor size and speed per single core, hardware manufacturers have shifted their focus to improve performance from raw clock power towards parallelization. Solutions to utilize the computation power of GPUs are published and supported by graphics card manufacturers. While there exist solutions for arbitrary precision integer arithmetics on the CPU there has been little adoption of these libraries to the GPU. This thesis presents an approach to map arbitrary precision integer operations to single threads on the GPU. This novel computation mapping technique is benchmarked and compared to a library that runs these computations on the CPU. Furthermore the novel parallelization technique is compared to an alternative mapping scheme proposed by Langer et al [Lan15]. It is shown that mapping computations to single threads outperforms both the CPU and the approach by Langer. This thesis also explored the feasibility of rational number operations on the GPU and shows that this is in fact practically usable by providing benchmarks.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Gusenbauer_Matthias_2015_ANM/", } @misc{hafner-2015-onff, title = "Optimization of Natural Frequencies for Fabrication-Aware Shape Modeling", author = "Christian Hafner and Przemyslaw Musialski and Thomas Auzinger and Michael Wimmer and Leif Kobbelt", year = "2015", abstract = "Given a target shape and a target frequency, we automatically synthesize a shape that exhibits this frequency as part of its natural spectrum, while resembling the target shape as closely as possible. We employ finite element modal analysis with thin-shell elements to accurately predict the acoustic behavior of 3d solids. Our optimization pipeline uses an input surface and automatically calculates an inner offset surface to describe a volumetric solid. The solid exhibits a sound with the desired pitch if fabricated from the targeted material. In order to validate our framework, we optimize the shape of a tin bell to exhibit a sound at 1760 Hz. We fabricate the bell by casting it from a mold and measure the frequency peaks in its natural ringing sound. The measured pitch agrees with our simulation to an accuracy of 2.5%. In contrast to previous method, we only use reference material parameters and require no manual tuning.", month = aug, publisher = "ACM", note = "Lecturer: P. Musialski", location = "Los Angeles, CA, USA", event = "ACM SIGGRAPH 2015", booktitle = "Proceedings of ACM SIGGRAPH 2015 Posters", Conference date = "Poster presented at ACM SIGGRAPH 2015 (2015-08-09--2015-08-13)", keywords = "natural frequencies, modal analysis, shape optimization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/hafner-2015-onff/", } @phdthesis{Smisek_Michal_A3D, title = "Analysis of 3D and 4D Images of Organisms in Embryogenesis", author = "Michal Sm\'{i}\v{s}ek", year = "2015", abstract = "In this work, we present a few modifications to the state-of-the-art algorithms, as well as several novel approaches, related to the detection of cells in biological image processing. We start by explanation of a PDE-based image processing evolution called FBLSCD and study its properties. We then define a fully automatic way of finding the stop time for this evolution. Afterwards, we try to see the FBLSCD as a morphological grayscale erosion, and we formulate a novel cell detection algorithm, called LSOpen, as an intersection of PDE-based and morphological image processing schools. Then, we discuss the best ways of inspecting cell detection results, i.e. cell identifiers. We try to quantitatively benchmark various cell detection methods by the relative amount of false positives, false negatives and multiply-detected centers yielded. We will observe that comparing cell detection results in a binary fashion is insufficient, therefore we are going to utilize the concept of distance function. Motivated by this need for robust cell detection result comparison, we analyze commonly-used methods for computing the distance function and afterwards we formulate a novel algorithm. This one has complexity O(n log2 n) and it yields Euclidean distance. In addition to that, we introduce a modification to this algorithm, enabling it to work also in maze-like, wall- and corner-containing, environments. This modification relies on the line rasterization algorithm. We perform various experiments to study and compare distance function methods. Results illustrate the viability of newly-proposed method. Further, a software for the comparing and inspecting cell detection results, SliceViewer, is specified, designed, implemented and tested. In the end, quantitative experiments are discussed, validating the above-mentioned novelties. ", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Smisek_Michal_A3D/", } @article{guerrero-2015-lsp, title = "Learning Shape Placements by Example", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer and Peter Wonka", year = "2015", abstract = "We present a method to learn and propagate shape placements in 2D polygonal scenes from a few examples provided by a user. The placement of a shape is modeled as an oriented bounding box. Simple geometric relationships between this bounding box and nearby scene polygons define a feature set for the placement. The feature sets of all example placements are then used to learn a probabilistic model over all possible placements and scenes. With this model we can generate a new set of placements with similar geometric relationships in any given scene. We introduce extensions that enable propagation and generation of shapes in 3D scenes, as well as the application of a learned modeling session to large scenes without additional user interaction. These concepts allow us to generate complex scenes with thousands of objects with relatively little user interaction.", month = aug, journal = "ACM Transactions on Graphics", volume = "34", number = "4", issn = "0730-0301", doi = "10.1145/2766933", pages = "108:1--108:13", keywords = "modeling by example, complex model generation", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/guerrero-2015-lsp/", } @mastersthesis{Sippl_Sebastian_EFG, title = "A Framework for GPU-Assisted Generation and Composition of Inductive Rotation Patterns", author = "Sebastian Sippl", year = "2015", abstract = "The Inductive Rotation Method, developed by the artist Hofstetter Kurt, is a strategy for generating elaborate artistic patterns by applying translations and rotations repeatedly to a copy of a so called prototile. The method has been inspired by aperiodic tilings such as the popular Penrose tilings. The Inductive Rotation Patterns and their nonperiodic structure is interesting from both a mathematical and from an artistic point of view. In the scope of a previous thesis different algorithms for the generation of such patterns were already implemented and researched which resulted in a program called the “Irrational Image Generator”. However, this software prototype provides only few features which support Hofstetter in designing patterns, and can only produce patterns with limited size. The limited size results from a property of the patterns: The number of tiles grows exponentially with each iteration. The Inductive Rotation Framework, a software framework for the generation of Inductive Rotation Patterns, was developed in the course of this thesis and unites new generation algorithms with an extended tool-set, like a graphical prototile editor which supports Hofstetter in his pattern design process. One of the existing algorithms was successfully parallelized and now allows the artist pattern generation via GPGPU methods. Depending on the implementation this can increase either pattern generation speed or the maximum pattern-size. In order to research the advantages and disadvantages of a recently developed tile substitution method for the creation of Inductive Rotation Patterns, the framework was extended by an algorithm which is based on this new discovery. Following the definition of the Inductive Rotation Method from Hofstetter, this tile-substitution method produces only a subset of Inductive Rotation Patterns. By varying the definition of Hofstetter’s Inductive Rotation Method only slightly, the Sierpinski gasket, a fractal pattern, emerges. The similarity between the Inductive Rotation Method and fractals can be observed further by comparing the parallel generation algorithm’s matrix scheme to Iterated Function Systems (IFSs), which are used to generate fractals.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Sippl_Sebastian_EFG/", } @article{musialski-2015-souos, title = "Reduced-Order Shape Optimization Using Offset Surfaces", author = "Przemyslaw Musialski and Thomas Auzinger and Michael Birsak and Michael Wimmer and Leif Kobbelt", year = "2015", abstract = "Given the 2-manifold surface of a 3d object, we propose a novel method for the computation of an offset surface with varying thickness such that the solid volume between the surface and its offset satisfies a set of prescribed constraints and at the same time minimizes a given objective functional. Since the constraints as well as the objective functional can easily be adjusted to specific application requirements, our method provides a flexible and powerful tool for shape optimization. We use manifold harmonics to derive a reduced-order formulation of the optimization problem, which guarantees a smooth offset surface and speeds up the computation independently from the input mesh resolution without affecting the quality of the result. The constrained optimization problem can be solved in a numerically robust manner with commodity solvers. Furthermore, the method allows simultaneously optimizing an inner and an outer offset in order to increase the degrees of freedom. We demonstrate our method in a number of examples where we control the physical mass properties of rigid objects for the purpose of 3d printing. ", month = aug, journal = "ACM Transactions on Graphics (ACM SIGGRAPH 2015)", volume = "34", number = "4", issn = "0730-0301", doi = "10.1145/2766955", pages = "102:1--102:9", keywords = "reduced-order models, shape optimization, computational geometry, geometry processing, physical mass properties", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/musialski-2015-souos/", } @article{Mistelbauer_Gabriel_2015_NHR, title = "New hybrid reformations of peripheral CT angiography: do we still need axial images?", author = "R\"{u}diger Schernthaner and Florian Wolf and Gabriel Mistelbauer and Michael Weber and Milo\v{s} \v{S}r\'{a}mek and Eduard Gr\"{o}ller and Christian Loewe", year = "2015", abstract = "Purpose To quantify the detectability of peripheral artery stenosis on hybrid CT angiography (CTA) reformations. Methods Hybrid reformations were developed by combining multipath curved planar reformations (mpCPR) and maximum intensity projections (MIP). Fifty peripheral CTAs were evaluated twice: either with MIP, mpCPR and axial images or with hybrid reformations only. Digital subtraction angiography served as gold standard. Results Using hybrid reformations, two independent readers detected 88.0% and 81.3% of significant stenosis, respectively. However, CTA including axial images detected statistically significant more lesions (98%). Conclusion Peripheral CTA reading including axial images is still recommended. Further improvement of these hybrid reformations is necessary.", month = jul, journal = "Clinic Imaging", number = "4", volume = "39", pages = "603--607", keywords = "Peripheral arterial occlusive disease;, CT angiography;, Three-dimensional reformations;, Postprocessing", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Mistelbauer_Gabriel_2015_NHR/", } @mastersthesis{fleiss-2015-da, title = "Interactive Exploration of Architecture Using Exploded Views", author = "Felix Flei{\ss}", year = "2015", abstract = "The master thesis at hand addresses automated generated explosion views as a tool of visualization and how they can be applied to illustrate architecture. I investigate existing solutions of the generation and the related theoretical foundation of explosion views, assembling process, architecture and visualization. Then I extract useful concepts from the foundation research to deduce design principles. The major part of the work is the designing and implementation of a visualization system that empowers the user to interactively explore and understand an architectural building. I hereby examine to what extend explosion views can be applied to do so. ", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "exploded views, architecture visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/fleiss-2015-da/", } @article{Jimenez_SSS_2015, title = "Separable Subsurface Scattering", author = "Jorge Jimenez and Karoly Zsolnai-Feh\'{e}r and Adrian Jarabo and Christian Freude and Thomas Auzinger and Xian-Chun Wu and Javier van der Pahlen and Michael Wimmer and Diego Gutierrez", year = "2015", abstract = "In this paper we propose two real-time models for simulating subsurface scattering for a large variety of translucent materials, which need under 0.5 milliseconds per frame to execute. This makes them a practical option for real-time production scenarios. Current state-of-the-art, real-time approaches simulate subsurface light transport by approximating the radially symmetric non-separable diffusion kernel with a sum of separable Gaussians, which requires multiple (up to twelve) 1D convolutions. In this work we relax the requirement of radial symmetry to approximate a 2D diffuse reflectance profile by a single separable kernel. We first show that low-rank approximations based on matrix factorization outperform previous approaches, but they still need several passes to get good results. To solve this, we present two different separable models: the first one yields a high-quality diffusion simulation, while the second one offers an attractive trade-off between physical accuracy and artistic control. Both allow rendering subsurface scattering using only two 1D convolutions, reducing both execution time and memory consumption, while delivering results comparable to techniques with higher cost. Using our importance-sampling and jittering strategies, only seven samples per pixel are required. Our methods can be implemented as simple post-processing steps without intrusive changes to existing rendering pipelines. https://www.youtube.com/watch?v=P0Tkr4HaIVk", month = jun, journal = "Computer Graphics Forum", volume = "34", number = "6", issn = "1467-8659", pages = "188--197", keywords = "separable, realtime rendering, subsurface scattering, filtering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Jimenez_SSS_2015/", } @bachelorsthesis{Plank_Pascal_2015_HVP, title = "Human Visual Perception of 3D Surfaces", author = "Pascal Plank", year = "2015", abstract = "The gauge figure task is a methodological tool to study an observer’s perception of surface orientations in renderings and visualization techniques. Originally developed to probe the perception of paintings, i. e. not veridical stimuli, the gauge figure task has since been used to measure absolute perceptual errors by comparing an observer’s surface estimates with the ground truth surface normals. In this bachelor thesis the accuracy of the gauge figure task was investigated, i. e. how well the probed surface estimates align with the perceived surface normals on an absolute scale. To isolate the probing error a user study was carried out using different primitive objects and several depth cues, including depth-from-motion and stereo disparities, to minimize potential perceptual errors. It was expected that a stereoscopic presentation of the gauge figure would reduce the perceptual error of the gauge figure dramatically. During the experiments I collected about 16.300 probes from 17 participants under different viewing conditions where either the stimuli, the gauge figure, both or none of them were presented in stereo. The results show that the gauge figure estimates for primitive stimuli, e. g. a sphere or a cylinder, align well with the ground truth in modality-consistent conditions, i. e. where stimuli and gauge figure were both presented in stereo or both in mono. In contrast to this, a gauge figure presented in stereo to probe monoscopic stimuli resulted in an enormous slant underestimation. In addition, in the inverse case, where the gauge figure is presented in mono and the stimuli in stereo, an overestimation occurred - even for simple stimulus objects. This bachelor thesis covers the general background and previous work for this subject, the design, setup and procedure of the user study as well as the results and a qualitative assessment. Furthermore, two alternative explanations for the found results are discussed and an outlook for possible future work is given.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Plank_Pascal_2015_HVP/", } @article{Cornel_Daniel_2015_VOC, title = "Visualization of Object-Centered Vulnerability to Possible Flood Hazards", author = "Daniel Cornel and Artem Konev and Berhard Sadransky and Zsolt Horvath and Eduard Gr\"{o}ller and J\"{u}rgen Waser", year = "2015", abstract = "As flood events tend to happen more frequently, there is a growing demand for understanding the vulnerability of infrastructure to flood-related hazards. Such demand exists both for flood management personnel and the general public. Modern software tools are capable of generating uncertainty-aware flood predictions. However, the information addressing individual objects is incomplete, scattered, and hard to extract. In this paper, we address vulnerability to flood-related hazards focusing on a specific building. Our approach is based on the automatic extraction of relevant information from a large collection of pre-simulated flooding events, called a scenario pool. From this pool, we generate uncertainty-aware visualizations conveying the vulnerability of the building of interest to different kinds of flooding events. On the one hand, we display the adverse effects of the disaster on a detailed level, ranging from damage inflicted on the building facades or cellars to the accessibility of the important infrastructure in the vicinity. On the other hand, we provide visual indications of the events to which the building of interest is vulnerable in particular. Our visual encodings are displayed in the context of urban 3D renderings to establish an intuitive relation between geospatial and abstract information. We combine all the visualizations in a lightweight interface that enables the user to study the impacts and vulnerabilities of interest and explore the scenarios of choice. We evaluate our solution with experts involved in flood management and public communication.", month = jun, journal = "Computer Graphic Forum", volume = "34", number = "3", note = "3rd Best Paper Award", issn = "1467-8659 (Online ISSN)", pages = "331--340", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Cornel_Daniel_2015_VOC/", } @WorkshopTalk{Vad_Viktor_2015_RVV, title = "Reproducibility, Verification, and Validation of Experiments on the Marschner-Lobb Test Signal", author = "Viktor Vad and Bal\'{a}zs Cs\'{e}bfalvi and Peter Rautek and Eduard Gr\"{o}ller", year = "2015", abstract = "The Marschner-Lobb (ML) test signal has been used for two decades to evaluate the visual quality of different volumetric reconstruction schemes. Previously, the reproduction of these experiments was very simple, as the ML signal was used to evaluate only compact filters applied on the traditional Cartesian lattice. As the Cartesian lattice is separable, it is easy to implement these filters as separable tensor-product extensions of well-known 1D filter kernels. Recently, however, non-separable reconstruction filters have received increased attention that are much more difficult to implement than the traditional tensor-product filters. Even if these are piecewise polynomial filters, the space partitions of the polynomial pieces are geometrically rather complicated. Therefore, the reproduction of the ML experiments is getting more and more difficult. Recently, we reproduced a previously published ML experiment for comparing Cartesian Cubic (CC), Body-Centered Cubic (BCC), and Face-Centered Cubic (FCC) lattices in terms of prealiasing. We recognized that the previously applied settings were biased and gave an undue advantage to the FCC-sampled ML representation. This result clearly shows that reproducibility, verification, and validation of the ML experiments is of crucial importance as the ML signal is the most frequently used benchmark for demonstrating the superiority of a reconstruction scheme or volume representations on non-Cartesian lattices.", month = may, event = "EuroVis Workshop on Reproducibility, Verification, and Validation in Visualization (EuroRV3)", location = "Cagliari, Sardinia, Italy", journal = "@inproceedings {eurorv3.20151140", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Vad_Viktor_2015_RVV/", } @article{Ilcik_2015_LAY, title = "Layer-Based Procedural Design of Facades", author = "Martin Il\v{c}\'{i}k and Przemyslaw Musialski and Thomas Auzinger and Michael Wimmer", year = "2015", abstract = "We present a novel procedural framework for interactively modeling building fa\c{c}ades. Common procedural approaches, such as shape grammars, assume that building fa\c{c}ades are organized in a tree structure, while in practice this is often not the case. Consequently, the complexity of their layout description becomes unmanageable for interactive editing. In contrast, we obtain a fa\c{c}ade by composing multiple overlapping layers, where each layer contains a single rectilinear grid of fa\c{c}ade elements described by two simple generator patterns. This way, the design process becomes more intuitive and the editing effort for complex layouts is significantly reduced. To achieve this, we present a method for the automated merging of different layers in the form of a mixed discrete and continuous optimization problem. Finally, we provide several modeling examples and a comparison to shape grammars in order to highlight the advantages of our method when designing realistic building fa\c{c}ades. You can find the paper video at https://vimeo.com/118400233 .", month = may, journal = "Computer Graphics Forum", volume = "34", number = "2", issn = "1467-8659", pages = "205--216", keywords = "procedural modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Ilcik_2015_LAY/", } @article{karimov-2015-HD, title = "Guided Volume Editing based on Histogram Dissimilarity", author = "Alexey Karimov and Gabriel Mistelbauer and Thomas Auzinger and Stefan Bruckner", year = "2015", abstract = "Segmentation of volumetric data is an important part of many analysis pipelines, but frequently requires manual inspection and correction. While plenty of volume editing techniques exist, it remains cumbersome and error-prone for the user to find and select appropriate regions for editing. We propose an approach to improve volume editing by detecting potential segmentation defects while considering the underlying structure of the object of interest. Our method is based on a novel histogram dissimilarity measure between individual regions, derived from structural information extracted from the initial segmentation. Based on this information, our interactive system guides the user towards potential defects, provides integrated tools for their inspection, and automatically generates suggestions for their resolution. We demonstrate that our approach can reduce interaction effort and supports the user in a comprehensive investigation for high-quality segmentations. ", month = may, journal = "Computer Graphics Forum", volume = "34", number = "3", pages = "91--100", keywords = "Edge and feature detection, Image Processing and Computer Vision, Computer Graphics, Display algorithms, Picture/Image Generation, Segmentation, Methodology and Techniques, Interaction techniques", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/karimov-2015-HD/", } @article{Viola_Ivan_2015_MCT, title = "MoleCollar and Tunnel Heat Map Visualizations for Conveying Spatio-Temporo-Chemical Properties Across and Along Protein Voids", author = "Jan Byska and Adam Jurcik and Eduard Gr\"{o}ller and Ivan Viola and Barbora Kozlikova", year = "2015", abstract = "Studying the characteristics of proteins and their inner void space, including their geometry, physico-chemical properties and dynamics are instrumental for evaluating the reactivity of the protein with other small molecules. The analysis of long simulations of molecular dynamics produces a large number of voids which have to be further explored and evaluated. In this paper we propose three new methods: two of them convey important properties along the long axis of a selected void during molecular dynamics and one provides a comprehensive picture across the void. The first two proposed methods use a specific heat map to present two types of information: an overview of all detected tunnels in the dynamics and their bottleneck width and stability over time, and an overview of a specific tunnel in the dynamics showing the bottleneck position and changes of the tunnel length over time. These methods help to select a small subset of tunnels, which are explored individually and in detail. For this stage we propose the third method, which shows in one static image the temporal evolvement of the shape of the most critical tunnel part, i.e., its bottleneck. This view is enriched with abstract depictions of different physicochemical properties of the amino acids surrounding the bottleneck. The usefulness of our newly proposed methods is demonstrated on a case study and the feedback from the domain experts is included. The biochemists confirmed that our novel methods help to convey the information about the appearance and properties of tunnels in a very intuitive and comprehensible manner.", month = may, journal = "Computer Graphics Forum", volume = "3", number = "34", note = "EuroVis 2015 - Conference Proceedings", pages = "1--10", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Viola_Ivan_2015_MCT/", } @inproceedings{Viola_Ivan_2015_VBS, title = "Visualization of Biomolecular Structures: State of the Art", author = "Barbora Kozlikova and Michael Krone and Norbert Lindow and Martin Falk and Marc Baaden and Daniel Baum and Ivan Viola and Julius Parulek and Hans-Christian Hege", year = "2015", abstract = "Structural properties of molecules are of primary concern in many fields. This report provides a comprehensive overview on techniques that have been developed in the fields of molecular graphics and visualization with a focus on applications in structural biology. The field heavily relies on computerized geometric and visual representations of three-dimensional, complex, large, and time-varying molecular structures. The report presents a taxonomy that demonstrates which areas of molecular visualization have already been extensively investigated and where the field is currently heading. It discusses visualizations for molecular structures, strategies for efficient display regarding image quality and frame rate, covers different aspects of level of detail, and reviews visualizations illustrating the dynamic aspects of molecular simulation data. The report concludes with an outlook on promising and important research topics to enable further success in advancing the knowledge about interaction of molecular structures.", month = may, publisher = "The Eurographics Association", location = "Cagliari, Italy", event = "Eurographics Conference on Visualization (EuroVis) (2015)", editor = "R. Borgo and F. Ganovelli and I. Viola", booktitle = "Eurographics Conference on Visualization (EuroVis) - STARs", pages = "061--081", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Viola_Ivan_2015_VBS/", } @article{MATTAUSCH-2015-CHCRT, title = "CHC+RT: Coherent Hierarchical Culling for Ray Tracing", author = "Oliver Mattausch and Jir\'{i} Bittner and Alberto Jaspe and Enrico Gobbetti and Michael Wimmer and Renato Pajarola", year = "2015", abstract = "We propose a new technique for in-core and out-of-core GPU ray tracing using a generalization of hierarchical occlusion culling in the style of the CHC++ method. Our method exploits the rasterization pipeline and hardware occlusion queries in order to create coherent batches of work for localized shader-based ray tracing kernels. By combining hierarchies in both ray space and object space, the method is able to share intermediate traversal results among multiple rays. We exploit temporal coherence among similar ray sets between frames and also within the given frame. A suitable management of the current visibility state makes it possible to benefit from occlusion culling for less coherent ray types like diffuse reflections. Since large scenes are still a challenge for modern GPU ray tracers, our method is most useful for scenes with medium to high complexity, especially since our method inherently supports ray tracing highly complex scenes that do not fit in GPU memory. For in-core scenes our method is comparable to CUDA ray tracing and performs up to 5.94 × better than pure shader-based ray tracing.", month = may, journal = "Computer Graphics Forum", volume = "34", number = "2", issn = "1467-8659", pages = "537--548", keywords = "occlusion culling, ray tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/MATTAUSCH-2015-CHCRT/", } @habilthesis{Matkovic_Kresimir_2015_, title = "Interactive Visual Analysis of Multi-Parameter Scientific Data", author = "Kresimir Matkovic", year = "2015", abstract = "Increasing complexity and a large number of control parameters make the design and understanding of modern engineering systems impossible without simulation. Advances in simulation technology and the ability to run multiple simulations with different sets of parameters pose new challenges for analysis techniques. The resulting data is often heterogeneous. A single data point does not contain scalars or vectors only, as usual. Instead, a single data point contains scalars, time series, and other types of mappings. Such a data model is common in many domains. Interactive visual analysis utilizes a tight feedback loop of computation/visualization and user interaction to facilitate knowledge discovery in complex datasets. Our research extends the visual analysis technology to challenging heterogeneous data, in particular to a combination of multivariate data and more complex data types, such as functions, for example. Furthermore, we focus on developing a structured model for interactive visual analysis which supports a synergetic combination of user interaction and computational analysis. The concept of height surfaces and function graphs is a proven and well developed mechanism for the analysis of a single mapping. The state of the art when a set of such mappings is analyzed suggested a use of different descriptors or aggregates in the analysis. Our research makes it possible to analyze a whole set of mappings (function graphs, or height surfaces, for example) while keeping the original data. We advance the interactive visual analysis to cope with complex scientific data. Most of the analysis techniques consider the data as a static source. Such an approach often hinders the analysis. We introduce a concept of interactive visual steering for simulation ensembles. We link the data generation and data exploration and analysis tasks in a single workflow. This makes it possible to tune and optimize complex systems having high dimensional parameter space and complex outputs.", month = may, URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Matkovic_Kresimir_2015_/", } @inproceedings{Bruckner_Stefan_2015_VAS, title = "Visual Analysis of Spatio-Temporal Data: Applications in Weather Forecasting", author = "Alexandra Diehl and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2015", abstract = "Weather conditions affect multiple aspects of human life such as economy, safety, security, and social activities. For this reason, weather forecast plays a major role in society. Currently weather forecasts are based on Numerical Weather Prediction (NWP) models that generate a representation of the atmospheric flow. Interactive visualization of geo-spatial data has been widely used in order to facilitate the analysis of NWP models. This paper presents a visualization system for the analysis of spatio-temporal patterns in short-term weather forecasts. For this purpose, we provide an interactive visualization interface that guides users from simple visual overviews to more advanced visualization techniques. Our solution presents multiple views that include a timeline with geo-referenced maps, an integrated webmap view, a forecast operation tool, a curve-pattern selector, spatial filters, and a linked meteogram. Two key contributions of this work are the timeline with geo-referenced maps and the curve-pattern selector. The latter provides novel functionality that allows users to specify and search for meaningful patterns in the data. The visual interface of our solution allows users to detect both possible weather trends and errors in the weather forecast model.We illustrate the usage of our solution with a series of case studies that were designed and validated in collaboration with domain experts.", month = may, location = "Cagliari, Sardinia, Italy", booktitle = "Computer Graphic Forum", pages = "381--390", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Bruckner_Stefan_2015_VAS/", } @mastersthesis{Hochmayr_Manuel_2015_PSE, title = "Parameter Settings Exploration in Visualisation by Using a Semi-automatic Process", author = "Manuel Hochmayr", year = "2015", abstract = "Parameters and the process of setting them play a major role in the world of computer based visualisation, no matter whether it is a visualisation of information or of volume data. Finding suitable parameter values can take up most of the time in the visualisation process and users have to sensibly adjust a large number of parameters. Finding a useful parameter value distribution for achieving the desired visualisation result can be a cumbersome process which also depends on the user’s speed and experience. The purpose of this master’s thesis is to find a new and faster way to reach an appropriate parameter value distribution resulting in the desired visualisation. For this master’s thesis a prototype is developed which guides the user through a semi-automatic process of adjusting parameter values, which finally results in the desired visualisation of a scientific volume. Using this prototype enables the users to explore a large number of different parameter values within only a few iterations steps and a short amount of time. In order to do so we move away from the classic approach of setting parameters by adjusting sliders or combo boxes. The idea of this thesis is to combine concepts that were already used in volume visualisation into a prototype. Our main strategy is to present pre-rendered images of the volume with different parameter values to the users. The images that are closest to the target visualisation can be selected and new images, similar to these, are shown. After some iterations of this process the users should have reached a visualisation that meets their expectations. The basis of our approach is a spreadsheet user interface. Further we make use of the concept of high-level parameters, which are a combination of lowlevel parameters, like the specular exponent, to one single parameter, like contrast. The advantage of this concept is to have parameters which are more understandable to the users. We move away from the concept of displaying every single image in the spreadsheet interface, having multiple pages. Instead we use kMeans++ or DBScan with an automatic method to choose the distance parameter ? to cluster the images by similarity. This results in only the cluster centres, which are images, being presented to the user in the spreadsheet interface for exploration. Additionally, Locally Linear Embedding (LLE) is used to map single images into a global coordinate system. As a second new approach we use the distance between the images within the coordinate system as a similarity measure for kMeans++ and DBScan. To provide a fast calculation of the Locally Linear Embedding, which includes the nearest neighbours, the distance matrix and the Eigenvalues of the images, we use CUDA. The selection process consists of two different steps: exploration and refinement. Depending on the cluster size of the selected image, a re-clustering of the sub cluster is done if the user has reached the end of the cluster due to having explored all images and not achieving the desired final image. Thus a new set with varied parameter values is created and used to render new images. In contrast to the initially created set, the newly created one takes into account the explored parameter values from the images chosen by the user. This means that the range - in which the values of the single parameters are varied - is limited by the minimum and maximum value the parameter received during the before made exploration. Our tests showed that that by combining all these techniques it is possible to explore many different parameter values for high-level parameters in a very short time, and to achieve visualisations equal to those created by setting parameter values manually. In a short test our approach enabled two users, who are rather inexperienced in the field of volume visualisation, to create similar visualisations in fewer steps than by setting parameter values manually.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Hochmayr_Manuel_2015_PSE/", } @article{Froehler_Berhnard_2015_ESM, title = "Multimodal Visualization and Analysis of Spectral and XCT Data", author = "Bernhard Fr\"{o}hler and Artem Amirkhanov and Johann Kastner and Eduard Gr\"{o}ller and Christoph Heinzl", year = "2015", abstract = "An increasing number of industrial applications demand a comprehensive analysis of both structural and chemical composition. Typically, non-destructive testing techniques focus on either structural or chemical characterization but do not deliver both. 3D X-Ray Computed Tomography (XCT) scans are well-suited for determining the internal and external structure of an object at high resolution. The attenuation value it delivers can however be the same or very similar for different materials. For a detailed chemical analysis XCT is therefore combined with spectral characterization techniques such as K-Edge Absorptiometry or X-ray Fluorescence Spectroscopy. In this paper, we are extending a previously introduced framework for visualization and analysis of specimens scanned with these two modalities in multiple ways: For better understanding the dependencies between the spectral energy levels, we propose Spectral Similarity Maps. Spectral Functional Boxplots visualize the statistical distribution of the spectral data. The Spectrum Explor-er improves the analysis of specimens of unknown composition. We demonstrate the usefulness of our techniques on several use cases.", month = apr, journal = "Computer Graphic Forum", volume = "33", number = "3", note = "appeared in June 2014", issn = "2411-5428", pages = "91--100", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Froehler_Berhnard_2015_ESM/", } @inproceedings{mindek-2015-mc, title = "Automatized Summarization of Multiplayer Games", author = "Peter Mindek and Ladislav \v{C}mol\'{i}k and Ivan Viola and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2015", abstract = "We present a novel method for creating automatized gameplay dramatization of multiplayer video games. The dramatization serves as a visual form of guidance through dynamic 3D scenes with multiple foci, typical for such games. Our goal is to convey interesting aspects of the gameplay by animated sequences creating a summary of events which occurred during the game. Our technique is based on processing many cameras, which we refer to as a flock of cameras, and events captured during the gameplay, which we organize into a so-called event graph. Each camera has a lifespan with a certain time interval and its parameters such as position or look-up vector are changing over time. Additionally, during its lifespan each camera is assigned an importance function, which is dependent on the significance of the structures that are being captured by the camera. The images captured by the cameras are composed into a single continuous video using a set of operators based on cinematographic effects. The sequence of operators is selected by traversing the event graph and looking for specific patterns corresponding to the respective operators. In this way, a large number of cameras can be processed to generate an informative visual story presenting the gameplay. Our compositing approach supports insets of camera views to account for several important cameras simultaneously. Additionally, we create seamless transitions between individual selected camera views in order to preserve temporal continuity, which helps the user to follow the virtual story of the gameplay.", month = apr, isbn = "978-80-223-3844-8", publisher = "Comenius University, Bratislava", location = "Smolenice, Slovakia", editor = "Joaquim Jorge, Luis Paulo Santos, Roman Durikovic", booktitle = "Proceedings of Spring Conference on Computer Graphics 2015", pages = "93--100", keywords = "storytelling, game visualization, animation", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/mindek-2015-mc/", } @techreport{ROEGNER-2015-IBR, title = "Image-based Reprojection Using a Non-local Means Algorithm", author = "Clemens R\"{o}gner and Michael Wimmer and Johannes Hanika and Carsten Dachsbacher", year = "2015", abstract = "We introduce an image-based approach to increase the framerate of image sequences generated with offline rendering algorithms. Our method handles in most cases reflections and refractions better than existing image-based temporal coherence techniques. The proposed technique is also more accurate than some image-based upsampling methods, because it calculates an individual result for each pixel. Our proposed algorithm takes a pair of frames and generates motion vectors for each pixel. This allows for adding a new frame between that pair and thus increasing the framerate. To find the motion vectors, we utilize the non-local means denoising algorithm, which determines the similarity of two pixels by their surrounding and reinterpret that similarity as the likelihood of movement from one pixel to the other. This is similar to what it is done in video encoding to reduce file size, but in our case is done for each pixel individually instead of a block-wise approach, making our technique more accurate. Our method also improves on work in the field of real-time rendering. Such techniques use motion vectors, which are generated through knowledge about the movement of objects within the scene. This can lead to problems when the optical flow in an image sequence is not coherent with the objects movement. Our method avoids those problems. Furthermore, previous work has shown, that the non-local means algorithm can be optimized for parallel execution, which signicantly reduces the time to execute our proposed technique as well. ", month = apr, number = "TR-186-2-15-02", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "optical flow, offline rendering, image reprojection, temporal upsampling, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/ROEGNER-2015-IBR/", } @misc{Ganuza_MJ_2015_AVe, title = "An\'{a}lisis visual en Geolog\'{i}a", author = " Mar\'{i}a Luj\'{a}n Ganuza and Gabriela Ferracutti and Maria Florencia Gargiulo and Silvia Mabel Castro and Kresimir Matkovic and Ernesto Bjerg and Eduard Gr\"{o}ller", year = "2015", abstract = "Los ge\'{o}logos usualmente trabajan con rocas que tienen edades oscilando entre pocos a miles de millones de años. Uno de los objetivos es tratar de reconstruir los ambientes geol\'{o}gicos donde se formaron las rocas y la sucesi\'{o}n de eventos que las afectaron desde su formaci\'{o}n a fin de comprender la evoluci\'{o}n geol\'{o}gica de la Tierra, identificar regiones donde se localizan dep\'{o}sitos minerales de inter\'{e}s econ\'{o}mico, recursos de combustibles, etc. Para alcanzar estos objetivos, recolectan informaci\'{o}n y muestras de rocas y minerales en el campo, En particular estos \'{u}ltimos son analizados en laboratorio con instrumentos para obtener datos geoqu\'{i}micos de minerales, como por ejemplo de los que conforman el grupo del espinelo. Dada la gran cantidad de datos generados, los cient\'{i}ficos se ven obligados a analizar grandes vol\'{u}menes de informaci\'{o}n para arribar a conclusiones basadas en datos objetivos. El flujo del trabajo de an\'{a}lisis de los ge\'{o}logos incluye el uso tedioso de varias herramientas y m\'{e}todos manuales relativamente complejos y propensos a errores para comparar diferentes gr\'{a}ficos y tablas. Para mejorarlo, los integrantes de este proyecto desarrollaron un framework de an\'{a}lisis visual de datos geol\'{o}gicos. Una realimentaci\'{o}n muy positiva de los expertos del dominio sobre \'{e}ste y el gran potencial de mejoramiento motiva esta l\'{i}nea de trabajo.", month = apr, publisher = " Red de Universidades con Carreras en Inform\'{a}tica (RedUNCI)", location = "Salta, Argentina", event = "Workshop de Investigadores en Ciencias de la Computaci\'{o}n (WICC 2015)", Conference date = "Poster presented at Workshop de Investigadores en Ciencias de la Computaci\'{o}n (WICC 2015) (2015-04-16--2015-04-17)", note = "1--5", pages = "1 – 5", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Ganuza_MJ_2015_AVe/", } @mastersthesis{Miao_Haichao_2015_VQC, title = "Visual Quantification of the Circle of Willis in Stroke Patients", author = "Haichao Miao", year = "2015", abstract = "This thesis presents a novel method for the visual quantification of cerebral arteries. The Circle of Willis (CoW) is an arterial structure that is responsible for the brain’s blood supply. Dysfunctions of this arterial circle can lead to strokes. The diagnosis of stroke patients is complex and relies on the radiologist’s expertise and the software tools used. These tools consist of very basic display methods of the volumetric data without support of state-of-the-art technologies in medical image processing and visualization. The goal of this thesis is to create an automated method for the standardized visualization of cerebral arteries in stroke patients in order to allow visual indications of problematic areas as well as straightforward inter-patient comparisons. Prior to the visualization, this work offers a solution for the extraction of the CoW from Time-of-Flight Magnetic Resonance Angiography (TOF-MRA) images. An enumeration technique for the labeling of the segments is therefore suggested. Furthermore, it proposes a method for the detection of the CoW’s main supplying arteries by analyzing the coronal, sagittal and transverse image planes of the volume. This work gives a comprehensive account of the entire pipeline that is required to extract the arteries in the CoW and to build a model for the standardized visualization. The final goal of this thesis is to create an effective display of the arteries based on a radial tree layout. The feasibility of the visual quantification method is tested in a study of 63 TOF-MRAs. With the proposed methodology applied to the subjects, the results were compared to the findings from radiologists. The obtained results demonstrate that the proposed techniques are effective in detecting the arteries of the CoW. Finally, we focused our methods on the identification of the main arteries.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Miao_Haichao_2015_VQC/", } @inproceedings{lemuzic_2015_timelapse, title = "Illustrative Timelapse: A Technique for Illustrative Visualization of Particle Simulations on the Mesoscale Level", author = "Mathieu Le Muzic and Manuela Waldner and Julius Parulek and Ivan Viola", year = "2015", abstract = "Animated movies are a popular way to communicate complex phenomena in cell biology to the broad audience. Animation artists apply sophisticated illustration techniques to communicate a story, while trying to maintain a realistic representation of a complex dynamic environment. Since such hand-crafted animations are timeconsuming and cost-intensive to create, our goal is to formalize illustration techniques used by artists to facilitate the automatic creation of visualizations generated from mesoscale particle-based molecular simulations. Our technique Illustrative Timelapse supports visual exploration of complex biochemical processes in dynamic environments by (1) seamless temporal zooming to observe phenomena in different temporal resolutions, (2) visual abstraction of molecular trajectories to ensure that observers are able to visually follow the main actors, (3) increased visual focus on events of interest, and (4) lens effects to preserve a realistic representation of the environment in the context. Results from a first user study indicate that visual abstraction of trajectories improves the ability to follow a story and is also appreciated by users. Lens effects increased the perceived amount of molecular motion in the environment while trading off traceability of individual molecules.", month = apr, publisher = "IEEE", organization = "8th IEEE Pacific Visualization Symposium (PacificVis 2015)", location = "Zijingang Campus, Zhejiang University, Hangzhou, China", booktitle = "Visualization Symposium (PacificVis), 2015 IEEE Pacific", pages = "247--254", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/lemuzic_2015_timelapse/", } @phdthesis{Reh_Andreas_VoP, title = "Visualization of Porosity in Carbon Fiber Reinforced Polymers", author = "Andreas Reh", year = "2015", abstract = "Industrial research is continuously increasing efforts in designing new-tailored light-weight materials in order to meet the high demands regarding efficiency, environment, safety as well as comfort. Especially in the aeronautics industry a high demand for advanced composite materials is observable. The new generations of aircrafts are made of more than 50 % of these novel composite materials. Carbon fiber reinforced polymers (CFRPs) are currently considered as the most promising candidate since this material is outperforming the majority of conventional materials. As a result of the manufacturing process this material tends to have pores inside. Pores in the material are typically inclusions of air. As they have an impact on the mechanical properties of the component, their determination and evaluation is an important task in quality control and a particular challenge for non-destructive testing (NDT) practitioners. Besides the characterization of individual pores, their spatial distribution in the tested component is a relevant factor. For example, a high concentration of pores in certain regions leads to different material characteristics as compared to a homogenous distribution of the pores. This work is based on 3D X-ray Computed Tomography (XCT) to gain new insight into CFRP components. Based on domain experts’ questions, specific tasks were derived. Besides the quantitative porosity determination, the main visualization tasks are: giving a fast porosity overview, exploring the individual pores, and tracking features over time based on XCT time-series. In this thesis, three novel visual analysis tools are presented to solve these tasks. To enhance the evaluation workflow for non-destructive testing (NDT) practitioners, a visualization pipeline for the interactive exploration and visual analysis of CFRP specimens is developed. After the calculation of local pore properties, i.e., volume, surface, extents and shape factors, a drill-down approach is employed to explore pores in a CFRP specimen. Therefore Porosity Maps (PM) are presented to allow for a fast porosity overview and selecting a region of interest. Pores in this region may be filtered and visualized with a parallel-coordinates selection. Furthermore a novel visualization technique which allows for a fast porosity overview and exploration of pores by focusing more on their shapes is proposed. In this method, all objects (pores) are clustered into a Mean Object (MObject). To explore this MObject, the visualization of mean object sets (MObject Sets) in a radial and a parallel alignment is introduced. By selecting a specific property such as the volume or shape factor and the desired number of classes, a MObject is split up into sub-classes. With this approach, intended classifications and visualizations of MObjects may be explored by the user. These representative MObjects may be exported as volumetric datasets to serve as input for successive calculations and simulations. For an overview of the pore properties in the dataset local MObjects are calculated in a grid and combined with a color-coded homogeneity visualization. Both approaches were evaluated with real-world CFRP specimens. To go one step further, time as a fourth dimension is added to analyze a process over time, e.g., how the features evolve and formate over time. Therefore features in a series of XCT scans are tracked with the Fuzzy Feature Tracking approach and are then visualized together with the extracted events in multiple linked-views, each emphasizing individual aspects of the 4D time-series data. Spatial feature information, global temporal overview, and global temporal evolution of how the features are tracked and connected over the whole time-series are covered with the visual-analysis system. The results and advantages of the Fuzzy Feature Tracking tool are demonstrated using various real-world applications, such as AlSiC alloys under thermal load or wood shrinkage analyses.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Carbon Fiber Reinforced Polymers, MObjects, Porosity, Visual Analysis, Visualization, Industrial Computed Tomography", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Reh_Andreas_VoP/", } @inproceedings{WEBER-2015-PRA, title = "Parallel Reyes-style Adaptive Subdivision with Bounded Memory Usage", author = "Thomas Weber and Michael Wimmer and John Owens", year = "2015", abstract = "Recent advances in graphics hardware have made it a desirable goal to implement the Reyes algorithm on current graphics cards. One key component in this algorithm is the bound-and-split phase, where surface patches are recursively split until they are smaller than a given screen-space bound. While this operation has been successfully parallelized for execution on the GPU using a breadth-first traversal, the resulting implementations are limited by their unpredictable worst-case memory consumption and high global memory bandwidth utilization. In this paper, we propose an alternate strategy that allows limiting the amount of necessary memory by controlling the number of assigned worker threads. The result is an implementation that scales to the performance of the breadth-first approach while offering three advantages: significantly decreased memory usage, a smooth and predictable tradeoff between memory usage and performance, and increased locality for surface processing. This allows us to render scenes that would require too much memory to be processed by the breadth-first method.", month = feb, isbn = "978-1-4503-3392-4", publisher = "ACM", organization = "ACM", location = "San Francisco, CA", booktitle = "Proceedings of the 19th Symposium on Interactive 3D Graphics and Games (i3D 2015)", pages = "39--45", keywords = "micro-rasterization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/WEBER-2015-PRA/", } @misc{KREUZER-2015-DPA, title = "Depixelizing Pixel Art in Real-Time", author = "Felix Kreuzer and Johannes Kopf and Michael Wimmer", year = "2015", abstract = "Pixel art was frequently employed in games of the 90s and earlier. On today's large and high-resolution displays, pixel art looks blocky. Recently, an algorithm was introduced to create a smooth, resolution-independent vector representation from pixel art. However, the algorithm is far too slow for interactive use, for example in a game. This poster presents an efficient implementation of the algorithm on the GPU, so that it runs at real-time rates and can be incorporated into current game emulators. Extended Abstract: http://dl.acm.org/citation.cfm?id=2721395", month = feb, publisher = "ACM New York, NY, USA", location = "San Francisco, CA", isbn = "978-1-4503-3392-4", event = "19th Symposium on Interactive 3D Graphics and Games", booktitle = "Proceedings of the 19th Symposium on Interactive 3D Graphics and Games", Conference date = "Poster presented at 19th Symposium on Interactive 3D Graphics and Games (2015-02-27--2015-03-01)", note = "130--130", pages = "130 – 130", keywords = "image processing, depixelizing, pixel art", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/KREUZER-2015-DPA/", } @mastersthesis{Wimmer_Maria_2015_SAS, title = "Semi-Automatic Spine Labeling on T1- and T2-weighted MRI Volume Data", author = "Maria Wimmer", year = "2015", abstract = "In medical diagnosis, the spine is often a frame of reference and so helps to localize diseases (e.g. tumors) in the human body. Automated spine labeling approaches are in demand, in order to replace time consuming, manual labeling by a radiologist. Different approaches have already been proposed in the literature, mainly for Computed Tomography (CT) and Magnetic Resonance Imaging (MRI) data. While CT scans exhibit a generalized intensity scale, MR images come with a high variability within the data and hence the tissues. Several factors influence the appearance of vertebrae and intervertebral disks in MRI data: different scanners, changes of acquisition parameters, magnetic field inhomogeneities or age-related, structural changes of the spinal anatomy. These factors compound the development of semi- and fully automatic spine labeling systems. The main goal of this thesis is to overcome these variations and find a generalized representation for different kinds of MR data. Furthermore, it aims for a semi-automatic labeling approach on these preprocessed scans where the user has to provide an initial click. Entropyoptimized Texture Models are applied to normalize the data to a standardized, reduced intensity scale.With Probabilistic Boosting Trees, intervertebral disk feature points are detected, whereby the disk center is selected with a Shape Particle Filter. The results achieved with the proposed pipeline are promising in terms of data normalization, timing and labeling accuracy. With a mean overall processing time of 6.0 s for normalizing and labeling a dataset (0.8 s per disk), the algorithm achieves a precision of 92.4% (recall = 86.8%). Using a higher resolution of the data for disk detection (average timing of 1.6 s per disk resp. 12.4 s per dataset), reduces the number of missed disk candidates and hence increases the recall to 91.7% (with a precision of 91.9%).", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Wimmer_Maria_2015_SAS/", } @mastersthesis{Freude_MSc, title = "Extending Separable Subsurface Scattering to Arbitrary Materials", author = "Christian Freude", year = "2015", abstract = "This thesis proposes extensions for the Separable Subsurface Scattering algorithm to support arbitrary materials. Four separable (rank-1) kernel models for the approximation of physically based diffuse reflectance profiles are presented. Each model offers different approximation quality and controllability. The first two models are based on singular value decomposition and a custom analytic pre-integration scheme. They enable fast deterministic kernel computation and provide fixed-quality solutions. Two additional parametrized models are based on automatic and manual optimization and provide more control over the approximation quality but are more time-consuming to generate. Higher rank approximations can be computed using the approach based on singular value decomposition. All four kernel models are used to compute approximations for physically measured diffuse reflectance profiles of different materials and tested using several special-case irradiance signals and complex proof-of-concept scenes. The results are compared to the state of the art in realtime rendering of subsurface scattering, showing comparable approximation quality at lower computational cost. The proposed extensions enable rendering of physically based subsurface scattering for arbitrary materials and dynamic scenes in real time. https://www.youtube.com/watch?v=P0Tkr4HaIVk", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "subsurface scattering, real-time", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Freude_MSc/", } @mastersthesis{Beham_Michael_PSC, title = "Parameter Spaces of Cups: Cluster-based Exploration of a Geometry Generator", author = "Michael Beham", year = "2015", abstract = "Geometry generators are commonly used in video games and evaluation systems for computer vision to create geometric shapes such as terrains, vegetation or airplanes. The parameters of the generator are often sampled automatically which can lead to many similar or unwanted objects. In this thesis, we propose a novel visual exploration approach that combines the abstract parameter space of the generator with the resulting geometric shapes in a composite visualization. Similar 3D shapes are first grouped using hierarchical clustering and then displayed in an illustrative parallel coordinates or scatterplot matrix visualization. This helps the user to study the sensitivity of the generator with respect to its parameter space and to identify invalid regions. Starting from a compact overview representation, the user can iteratively drill-down into local shape differences by clicking on the respective clusters. Additionally, a linked radial tree gives an overview of the cluster hierarchy and enables the user to manually split or merge clusters. We evaluate our approach by exploring the parameter space of a cup generator and provide feedback from domain experts. Kurzfassung: Geometriegeneratoren werden h\"{a}ufig in Videospielen und Evaluierungssystemen f\"{u}r Maschinelles Sehen eingesetzt um 3D Geometrie wie Terrains, Vegetation und Flugzeuge zu erstellen. Die Generatoren werden durch Parameter gesteuert. Diese werden oft automatisch abgetastet, um verschiedenste Variationen eines Objektes zu erzeugen. Dies f\"{u}hrt aber oft zu sehr \"{a}hnlichen oder unerw\"{u}nschten Objekten. In dieser Diplomarbeit wird ein neues Visualisierungssystemvorgestellt, welches die Analyse des abstrakten Parameterraumes eines Geometriegenerators gemeinsam mit den resultierenden Geometrieobjekten mithilfe neuer Visualisierungen darstellt. \"{A}hnliche Objekte werden zun\"{a}chst mittles Hierarchischem Clustering gruppiert und anschlie{\ss}end in illustrativen Parallelen Koordinaten dargestellt. Dies erm\"{o}glicht dem/der BenutzerIn die Sensitivit\"{a}t einzelner Parameter zu analysieren und Regionen im Parameterraum zu identifizieren, welche unerw\"{u}nschte Ergebnisse liefern. Die Visualisierung startet mit einer kompakten \"{u}bersichtlichen Darstellung und der/die BenutzerIn kann interaktiv die lokalen Unterschiede durch klicken auf den gew\"{u}nschten Cluster analysieren. Zur erh\"{o}hten \"{U}bersichtlichkeit wird auch eine radiale Baumdarstellung der Cluster angeboten. Desweiteren k\"{o}nnen Cluster vereinigt und gesplittet werden. Das System wurde von Fachleuten evaluiert, wobei ein Tassengenerator analysiert wurde. ", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Beham_Michael_PSC/", } @studentproject{audioflow, title = "AudioFlow", author = "Matthias Adorjan", year = "2015", abstract = "In this work we present a way to use flow datasets in an artistic way. The main goal of our work is to analyze how flow data can influence audio playback. We use values from existing flow datasets to control the playback of an audio stream in our developed music player called AudioFlow. This application allows the user to extract flow data values from a loaded dataset with the help of simple drag and drop gestures. A rendered flow visualization makes it possible to select specific flow channel values. The extracted data values are then used to change the volume and playback speed of a background audio stream. Furthermore, it is possible to introduce a fading effect, which repeatedly decreases and increases the volume of the played audio file.", keywords = "media interaction, audio manipulation, flow visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/audioflow/", } @talk{Purg2015-a, title = "Was ist denn Farbe eigentlich?", author = "Werner Purgathofer", year = "2015", event = "Farben?-Farben!", location = "Kunst-Volkshochschule Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Purg2015-a/", } @talk{Purg2015-b, title = "Visual Computing in \"{O}sterreich", author = "Werner Purgathofer", year = "2015", event = "Medienseminar Informatik der Plattform informatik_austria", location = "Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Purg2015-b/", } @talk{Purg2015-c, title = "Computer Graphics in Real-World Applications", author = "Werner Purgathofer", year = "2015", event = "Visual Computing Workshop at the OCG-Jahrestagung", location = "Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Purg2015-c/", } @talk{Purg2015-d, title = "Industrial Use of Mixed Reality in VRVis Projects", author = "Werner Purgathofer", year = "2015", event = "VISIC'2015 Int'l Symposium on Virtual Reality, Augmented Reality and Visual Computing Application Technology", location = "Beijing, China", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Purg2015-d/", } @mastersthesis{Adorjan-2015, title = "The OpenSFM Database", author = "Matthias Adorjan", year = "2015", abstract = "Besides using high-cost laser scanning equipment to capture large point clouds for topographical or architectural purposes, nowadays other, more affordable approaches exist. Structure-from-motion (SfM) in combination with multi-view stereo (MVS) is such a low-cost photogrammetric method used to generate large point datasets. It refers to the process of estimating three-dimensional structures out of two-dimensional image sequences. These sequences can even be captured with conventional consumer-grade digital cameras. In our work we aim to a establish a free and fully accessible structure-from-motion system, based on the idea of collaborative projects like OpenStreetMap. Our client-server system, called OpenSfM, consists of a web front-end which lets the user explore, upload and edit SfM-datasets and a back-end that answers client requests and processes the uploaded data and stores it in a database. The front-end is a virtual tourism client which allows the exploration of georeferenced point clouds together with their corresponding SfM-data like camera parameters and photos. The information is rendered in the context of an interactive virtual globe. An upload functionality makes it possible to integrate new SfM-datasets into the system and improve or extend existing datasets by adding images that fill missing areas of the affected point cloud. Furthermore, an edit mode allows the correction of georeferencing or reconstruction errors. On the other side the back-end evaluates the uploaded information and generates georeferenced point datasets using a state-of-the-art SfM engine and the GPS data stored in the uploaded images. The generated point clouds are preprocessed, such that they can be used by the front-end’s point cloud renderer. After that, they are stored together with the uploaded images and SfM parameters in the underlying database. On the whole, our system allows the gathering of SfM-datasets that represent different sights or landmarks, but also just locally famous buildings, placed all over the world. Those datasets can be explored in an interactive way by every user who accesses the virtual tourism client using a web browser.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Adorjan-2015/", } @article{raidou_vis15, title = "Orientation-Enhanced Parallel Coordinate Plots", author = "Renata Raidou and Martin Eisemann and Marcel Breeuwer and Elmar Eisemann and Anna Vilanova i Bartroli", year = "2015", journal = "IEEE transactions on visualization and computer graphics", volume = "22", number = "1", pages = "589--598", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/raidou_vis15/", } @phdthesis{mindek-thesis, title = "Interactive Integrated Exploration and Management of Visualization Parameters", author = "Peter Mindek", year = "2015", abstract = "Visualization algorithms are parameterized to offer universality in terms of handling various data types, showing different aspects of the visualized data, or producing results useful for domain experts from different fields. Hence, input parameters are an important aspect of the visualization process. Their exploration and management are tasks which enable the visualization reusability, portability, and interdisciplinary communication. With increasing availability of visualization systems, which are suitable for a great variety of tasks, their complexity increases as well. This usually involves many input parameters necessary for the meaningful visualization of data. Multiple input parameters form parameter spaces which are too large to be explored by brute-force. Knowing the properties of a parameter space is often beneficial for improving data visualization. Therefore, it is important for domain experts utilizing data visualization to have tools for automatic parameter specification and for aiding the manual parameter setting. In this thesis, we review existing approaches for parameter-space visualization, exploration, and management. These approaches are used with a great variety of underlying algorithms. We focus on their applicability to visualization algorithms. We propose three methods solving specific problems arising from the fact that the output of a visualization algorithm is an image, which is challenging to process automatically and often needs to be analyzed by a human. First, we propose a method for the exploration of parameter-spaces of visualization algorithms. The method is used to understand effects of combinations of parameters and parts of the internal structure of the visualization algorithms on the final image result. The exploration is carried out by specifying semantics for localized parts of the visualization images in the form of positive and negative examples influenced by a set of input parameters or parts of the visualization algorithm itself. After specifying the localized semantics, global effects of the specified components of the visualization algorithm can be observed. The method itself is independent from the underlying algorithm. Subsequently, we present a method for managing image-space selections in visualizations and automatically link them with the context in which they were created. The context is described by the values of the visualization parameters influencing the output image. The method contains a mechanism for linking additional views to the selections, allowing the user an effective management of the visualization parameters whose effects are localized to certain areas of the visualizations. We present various applications for the method, as well as an implementation in the form of a library, which is ready to be used in existing visualization systems. Our third method is designed to integrate dynamic parameters stored during a multiplayer video game session by the individual participating players. For each player, the changing parameter values of the game describe their view of the gameplay. Integrating these multiple views into a single continuous visual narrative provides means for effective summarization of gameplays, useful for entertainment, or even gameplay analysis purposes by semi-professional or professional players. We demonstrate the utility of our approach on an existing video game by producing a gameplay summary of a multiplayer game session. The proposed method opens possibilities for further research in the areas of storytelling, or at a more abstract level, parameter integration for visual computing algorithms. ", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/mindek-thesis/", } @studentproject{2015-image-coll, title = "Organisation of Image Collections", author = "Martina Rasch", year = "2015", abstract = "On the internet there are a lot of image collections, for example on Flickr or Imgur. Many of them let any user upload photographs or images. As a result, they have a large number of images, for instance Flickr had more than 10 billion image uploads in 2014 alone. This makes it dicult to nd speci c images or images with certain attributes. Therefore, we propose a new method for searching pictures in image collections.", keywords = "pivot view, image collections", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/2015-image-coll/", } @xmascard{freude-2015-xmc, title = "X-Mas Card 2015", author = "Christian Freude", year = "2015", abstract = "This greeting card shows the branch of a Christmas tree which is decorated with a candle as well as the logos of our institute and the TU Wien. For a realistic representation of Subsurface Scattering in the candle and the pine needles, a method called Separable Subsurface Scattering was used, which was developed in cooperation with our institute. This image was calculated in real time, with the additional use of HDR rendering and depth-of-field. Only the frost effect and the flame of the candle were added later using GIMP. The images on the right show additional examples of the used Subsurface Scattering technique. Diese Gru{\ss}karte zeigt den Ast eines Christbaums welcher mit einer Kerze sowie den Logos des Instituts und der TU Wien dekoriert ist. Um eine m\"{o}glichst realistische Darstellung von Subsurface Scattering in der Kerze sowie den Tannennadeln zu erzielen, wurde eine Methode namens Separable Subsurface Scattering verwendet, welche in Kooperation mit unserem Institut entwickelt wurde. Dieses Bild wurde in Echtzeit berechnet, unter zus\"{a}tzlicher Verwendung von HDR Rendering und Tiefenunsch\"{a}rfe. Lediglich der Frost-Effekt sowie die Flamme der Kerze wurden nachtr\"{a}glich mittels GIMP hinzugef\"{u}gt. Die Bilder auf der rechten Seite zeigen weitere Beispiele der verwendeten Subsurface Scattering Technik.", keywords = "X-Mas Card", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/freude-2015-xmc/", } @talk{WIMMER-2015-CGFC, title = "Real-Time Computer Graphics and the Future of CAAD", author = "Michael Wimmer", year = "2015", abstract = "The gap between real-time and non-real-time computer graphics is closing. Current approaches are able to render large amounts of geometry at interactive rates while at the same time providing an unprecedented level of feedback and support for the user. Given these developments, it will be possible to bridge the classical divide between CAAD modelling and visualization that is still pre-eminent in the architectural world. This keynote talk will take one step into this direction, presenting both recent research conducted at TU Wien's Rendering Group and the computer graphics community world-wide.", event = "33rd Annual eCAADe conference", location = "Vienna, Austria", keywords = "computer graphics", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/WIMMER-2015-CGFC/", } @talk{WIMMER-2015-CGMC, title = "Computer Graphics Meets Computational Design", author = "Michael Wimmer", year = "2015", abstract = "In this talk, I will report on recent advancements in Computer Graphics, which will be of great interest for next-generation computational design tools. I will present methods for modeling from images, modeling by examples and multiple examples, but also procedural modeling, modeling of physical behavior and light transport, all recently developed in our group. The common rationale behind our research is that we exploit real-time processing power and computer graphics algorithms to enable interactive computational design tools that allow short feedback loops in design processes.", event = "VGS Invited Talks Series", location = "Brno University of Technology, Czech Republic", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/WIMMER-2015-CGMC/", } @bachelorsthesis{gostler-2015-3dp, title = "Denoising Medical Surface Meshes for 3D-Printing", author = "Anna Gostler", year = "2015", abstract = "3D-printed anatomical models obtained from medical volume data (CT, MRI, ultrasound) can be used for surgery-planning, diagnosis, fabrication of implants and education. Surface meshes that are extracted from real-world data typically suffer from noise, as well as other types of artifacts. This thesis compares different common denoising algorithms and asseses their applicability to medical surface meshes, particularly with regard to subsequent 3D-printing. Additionally this thesis proposes an approach to detect and remove long, thin artifacts, which are commonly found in medical data. If they are removed before smoothing, deformations in the smoothed mesh can be prevented and less support structures are needed for 3D-printing.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/gostler-2015-3dp/", } @article{raidou_EuroVis15, title = "Visual analytics for the exploration of tumor tissue characterization", author = "Renata Raidou and Uulke A van der Heide and Cuong V Dinh and Ghazaleh Ghobadi and Jesper Follsted Kallehauge and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2015", abstract = "Tumors are heterogeneous tissues consisting of multiple regions with distinct characteristics. Characterization ofthese intra-tumor regions can improve patient diagnosis and enable a better targeted treatment. Ideally, tissuecharacterization could be performed non-invasively, using medical imaging data, to derive per voxel a number offeatures, indicative of tissue properties. However, the high dimensionality and complexity of this imaging-derivedfeature space is prohibiting for easy exploration and analysis - especially when clinical researchers require toassociate observations from the feature space to other reference data, e.g., features derived from histopathologicaldata. Currently, the exploratory approach used in clinical research consists of juxtaposing these data, visuallycomparing them and mentally reconstructing their relationships. This is a time consuming and tedious process,from which it is difficult to obtain the required insight. We propose a visual tool for: (1) easy exploration and visualanalysis of the feature space of imaging-derived tissue characteristics and (2) knowledge discovery and hypothesisgeneration and confirmation, with respect to reference data used in clinical research. We employ, as central view,a 2D embedding of the imaging-derived features. Multiple linked interactive views provide functionality for theexploration and analysis of the local structure of the feature space, enabling linking to patient anatomy andclinical reference data. We performed an initial evaluation with ten clinical researchers. All participants agreedthat, unlike current practice, the proposed visual tool enables them to identify, explore and analyze heterogeneousintra-tumor regions and particularly, to generate and confirm hypotheses, with respect to clinical reference data.", journal = "In Computer Graphics Forum", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/raidou_EuroVis15/", } @talk{Auzinger-2015-IST, title = "Prefiltered Anti-Aliasing on Parallel Hardware", author = "Thomas Auzinger", year = "2015", event = "Seminar Talk", location = "IST Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Auzinger-2015-IST/", } @phdthesis{auzinger-2015-phd, title = "Sampled and Prefiltered Anti-Aliasing on Parallel Hardware", author = "Thomas Auzinger", year = "2015", abstract = "A fundamental task in computer graphics is the generation of two-dimensional images. Prominent examples are the conversion of text or three-dimensional scenes to formats that can be presented on a raster display. Such a conversion process - often referred to as rasterization or sampling - underlies inherent limitations due to the nature of the output format. This causes not only a loss of information in the rasterization result, which manifests as reduced image sharpness, but also causes corruption of the retained information in form of aliasing artifacts. Commonly observed examples in the final image are staircase artifacts along object silhouettes or Moire-like patterns. The main focus of this thesis is on the effective removal of such artifacts - a process that is generally referred to as anti-aliasing. This is achieved by removing the offending input information in a filtering step during rasterization. In this thesis, we present different approaches that either minimize computational effort or emphasize output quality. We follow the former objective in the context of an applied scenario from medical visualization. There, we support the investigation of the interiors of blood vessels in complex arrangements by allowing for unrestricted view orientation. Occlusions of overlapping blood vessels are minimized by automatically generating cut-aways with the help of an occlusion cost function. Furthermore, we allow for suitable extensions of the vessel cuts into the surrounding tissue. Utilizing a level of detail approach, these cuts are gradually smoothed with increasing distance from their respective vessels. Since interactive response is a strong requirement for a medical application, we employ fast sample-based anti-aliasing methods in the form of visibility sampling, shading supersampling, and post-process filtering. We then take a step back and develop the theoretical foundations for anti-aliasing methods that follow the second objective of providing the highest degree of output quality. As the main contribution in this context, we present exact anti-aliasing in the form of prefiltering. By computing closed-form solutions of the filter convolution integrals in the continuous domain, we circumvent any issues that are caused by numerical integration and provide mathematically correct results. Together with a parallel hidden-surface elimination, which removes all occluded object parts when rasterizing three-dimensional scenes, we present a ground-truth solution for this setting with exact anti-aliasing. We allow for complex illumination models and perspective-correct shading by combining visibility prefiltering with shading sampling and generate a ground-truth solution for multisampling anti-aliasing. All our aforementioned methods exhibit highly parallel workloads. Throughout the thesis, we present their mapping to massively parallel hardware architectures in the form of graphics processing units. Since our approaches do not map to conventional graphics pipelines, we implement our approach using general-purpose computing concepts. This results in decreased runtime of our methods and makes all of them interactive.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/auzinger-2015-phd/", } @bachelorsthesis{bayer-2015-gapaot, title = ""Gangsters and Pranksters: The Art of Thievery": A Multiplayer Stealth Game in the Context of the Generation of Dynamic Replay Videos", author = "Alexander Bayer", year = "2015", abstract = "We wanted to create a game that is fully compatible to the surveillance system created by Peter Mindek and his colleagues. The game is a stealth based multiplayer game where you have to steal precious objects from a museum and avoid the watchman that want to catch you. After a round the game saves the replay data and the data for the event hierarchy that can be used by the surveillance system.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/bayer-2015-gapaot/", } @talk{mindek-2015-mctalk, title = "Automatized Summarization of Multiplayer Games", author = "Peter Mindek", year = "2015", abstract = "We present a novel method for creating automatized gameplay dramatization of multiplayer video games. The dramatization serves as a visual form of guidance through dynamic 3D scenes with multiple foci, typical for such games. Our goal is to convey interesting aspects of the gameplay by animated sequences creating a summary of events which occurred during the game. Our technique is based on processing many cameras, which we refer to as a flock of cameras, and events captured during the gameplay, which we organize into a so-called event graph. Each camera has a lifespan with a certain time interval and its parameters such as position or look-up vector are changing over time. Additionally, during its lifespan each camera is assigned an importance function, which is dependent on the significance of the structures that are being captured by the camera. The images captured by the cameras are composed into a single continuous video using a set of operators based on cinematographic effects. The sequence of operators is selected by traversing the event graph and looking for specific patterns corresponding to the respective operators. In this way, a large number of cameras can be processed to generate an informative visual story presenting the gameplay. Our compositing approach supports insets of camera views to account for several important cameras simultaneously. Additionally, we create seamless transitions between individual selected camera views in order to preserve temporal continuity, which helps the user to follow the virtual story of the gameplay.", event = "Numerical Geometry Seminar", location = "Comenius University in Bratislava", keywords = "Animation, Storytelling, Game Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/mindek-2015-mctalk/", } @talk{Purgathofer-2015-WA, title = "\"{U}berblick Visual Computing: Teilbereiche und Anwendungsfelder", author = "Werner Purgathofer", year = "2015", event = "Business Treff "Visual Computing"", location = "Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Purgathofer-2015-WA/", } @talk{musialski-2015-ista, title = "Reduced-Order Shape Optimization Using Offset Surfaces", author = "Przemyslaw Musialski", year = "2015", abstract = "Given the 2-manifold surface of a 3d object, we propose a novel method for the computation of an offset surface with varying thickness such that the solid volume between the surface and its offset satisfies a set of prescribed constraints and at the same time minimizes a given objective functional. Since the constraints as well as the objective functional can easily be adjusted to specific application requirements, our method provides a flexible and powerful tool for shape optimization. We use manifold harmonics to derive a reduced-order formulation of the optimization problem, which guarantees a smooth offset surface and speeds up the computation independently from the input mesh resolution without affecting the quality of the result. The constrained optimization problem can be solved in a numerically robust manner with commodity solvers. Furthermore, the method allows simultaneously optimizing an inner and an outer offset in order to increase the degrees of freedom. We demonstrate our method in a number of examples where we control the physical mass properties of rigid objects for the purpose of 3d printing. Link: https://ist.ac.at/events/symposia-conferences/2015/siggraph-nano-ist-austria/date/433/", event = "SIGGRAPH Nano @ Institute of Science and Technology (IST), Austria", location = "Institute of Science and Technology (IST), Austria", keywords = "geometry processing, digital fabrication, shape optimization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/musialski-2015-ista/", } @talk{musialski-2015-pixel, title = "Computational Design for Consumer-Level Fabrication", author = "Przemyslaw Musialski", year = "2015", abstract = "Digital fabrication is currently a rapidly emerging field in several science and engineering fields. Especially the technique of additive manufacturing (AM), commonly referred to as 3d printing, is a fast growing area. While this technology is in essence not new, the recent expiration of key patents for 3d-printing technology led to a break-through, which already also arrived at the consumer-level 3d-printers market. This development brings about novel requirements on digital model design. Traditional fields used to deal with digital manufacturing, like rapid prototyping, material sciences, or industrial engineering learned to deal with existing CAD-software in order to create digital content. However, the expansion of the digital fabrication technology into everybody’s homes and offices brings about the demand for novel paradigms of consumer-level computational design. This novel personal fabrication aims at bridging the gap between the still advancing digital domain and the “good old” physical world. In this talk I will give an overview of current development of such computational design in the field of computer graphics and provide details on an example application. Link: http://www.pixelvienna.com/10/event/talks#musialski", event = "PIXEL X - Annual Austrian Computer Graphics and Animation Conference, Vienna, Austria", location = "PIXEL X, TU Wien, Karlsplatz 1, Vienna, Austria", keywords = "digital fabrication, geometry processing, shape optimization, computational design", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/musialski-2015-pixel/", } @talk{musialski-2015-vrvis, title = "Shape Optimization for Consumer-Level 3D Printing", author = "Przemyslaw Musialski", year = "2015", abstract = "Traditionally, 3d modelling in computer graphics deals with the geometric and visual aspects of 3d shapes. On the other hand, due to the growing capabilities of personal digital fabrication technology and its spread into offices and homes, 3d models are increasingly entering the physical world. Therefore, the physical properties of 3d models come into focus. For example, 3d-printed models should be able to stand balanced in a desired pose without toppling over, or should react in certain ways when external forces are applied, like spinning the object. It is thus desirable to have methods that allow the user to specify the intended physical properties of an object in addition to its 3d geometry, and which automatically take these properties into account when generating a specification for a 3d printer. In this talk I will give an introduction to such computational design problems and introduce our novel method for shape optimization based on offset surfaces. Link: http://www.vrvis.at/research/events/vrvisforum/vrvisforum-123-29.10.2015", event = "VRVisForum #123", location = "VRVis, Techgate Vienna, Austria", keywords = "shape optimization, geometry processing, digital fabrication", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/musialski-2015-vrvis/", } @mastersthesis{textures-3d-printing, title = "Projector-Based Textures for 3D-Printed Models - Tangible Molecular Visualization", author = "Simon Brenner", year = "2015", abstract = "The now widely available 3D-printing technology enables structural molecular biologists to easily produce tangible models of large and complex molecules, which can aid them in understanding their spatial structure. Those models, however, are static and often monochrome, therefore their information content cannot compete with existing screenbased visualization solutions. Following the paradigm of spatial augmented reality, we present an approach to dynamically visualize molecular properties directly on the surface of 3D-printed tangible models, using a digital projector. We developed a prototype system consisting of hardware and software, that enables the tracking of the tangible model and the rendering of colorcoded molecular properties, which are then projected onto the tangible surface. Using knowledge about the geometry of the molecular model, the optical properties of the digital projector and the exact spatial relation between projector and model, the rendered projections are updated in real-time, such that they stay registered with the tangible model during user interaction. We evaluated the usability and potential applicability of the developed system by collecting feedback from domain experts from the fields of biochemistry and molecular biology.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "augmented reality, 3D textures, biomedical visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/textures-3d-printing/", } @bachelorsthesis{bydlinski-2015-gapaot, title = ""Gangsters and Pranksters: The Art of Thievery": A Multiplayer Stealth Game in the Context of the Generation of Dynamic Replay Videos", author = "Lukas Bydlinski", year = "2015", abstract = "The projekt "Gangsters and Pranksters: The Art of Thievery", which I worked on in cooperation with colleague Alexander Bayer, has two goals: On the one hand, it shall work at its own as an entertaining multiplayer stealth game for up to four human players, on the other hand it was planned from scratch to be fully compatible with the "Surveillance System" developed by Peter Mindek for automatically generating dynamic replay videos. This thesis concentrates on the topics user interface, virtual architecture, level design, texturing and creation of an event hierarchy.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/bydlinski-2015-gapaot/", } @talk{mistelbauer-2015-abvv, title = "Advanced Blood Vessel Visualization", author = "Gabriel Mistelbauer", year = "2015", abstract = "Visualizations of vascular structures are frequently used in radiological investigations to detect and analyze vascular diseases. Thus, the analysis of blood vessels for their diagnosis and treatment are important research fields of radiology. Angiographic interventions, such as stenting, balloon dilatation, or bypass surgery, need to be planned with care and precision, due to their major impact on the patient’s health state. In order to optimally decide on the therapeutic procedure, specific diagnostic methods are required. They assess the health state of blood vessels and answer clinically relevant questions such as, e.g., if blood is partially or entirely hindered from flowing through a vessel by a clot or a mineral deposition on the vessel wall, such as a calcification. In this talk, I will present Curved Planar Reformation (CPR), a method that is designed for the investigation of peripheral arteries. This technique creates a curved plane along the centerlines of blood vessels, revealing their interior, or lumen. This allows radiologists to precisely judge if blood is able to flow through a vessel or if it is significantly hindered, as in the case of a stenosis. I will also introduce advanced methods, like Curved Surface Reformation (CSR), which computes the vessel lumen entirely in 3D, offering high-quality interactive visualizations of the vessels' interior. While it is possible with conventional CPR methods to examine the whole vessel lumen by rotating around the centerline of a vessel, I will present other possibilities of visualizing vascular structures, such as Curvicircular Feature Aggregation (CFA). This approach aggregates the single rotated images of CPR into a single view. By eliminating the need for rotation, blood vessels can be investigated by inspecting only one image. This method can be used as a guidance and visual analysis tool for treatment planning. As vessel visualization provides very specific insight, the overall contextual information outside the vessel lumen might deteriorate. I will present means how to overcome this by providing a spatial indication. Advancing to visualization of the cerebral arteries, I will present an approach how to automatically extract the Circle of Willis and assess the morphology of this arterial circle within a single visualization. Despite being an important part of many analysis pipelines, the segmentation of blood vessels commonly requires manual inspection and correction. While plenty of volume editing techniques exist, it remains cumbersome and error-prone for the user to find and select appropriate regions for editing. In this talk, I will present the application of a general volume editing technique to the domain of vascular structures and how it reduces the necessary interaction to obtain high-quality segmentations. Due to the ever growing volume of acquired data and information, users have to be constantly aware of the methods for their exploration and for interaction. Owing to this, innovations may be used inappropriately and users may become skeptical. In this talk, I will present a knowledge-assisted interface for medical visualization, which reduces the necessary effort to use new visualization methods, by providing only the most relevant ones in a smart way. Consequently, we are able to expand such a system with innovations without the users to worry about when, where, and especially how they may or should use them. I will show how to apply this concept in the context of blood vessels investigations.", event = "Invited Talk", location = "Otto-von-Guericke University Magdeburg, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/mistelbauer-2015-abvv/", } @talk{Groeller_Eduard_2015_SMV, title = "Simplifying Medical Visualization through Sparse Interaction and Reformation, MICCAI 2015 Tutorial on Advanced Medical Visualization", author = "Eduard Gr\"{o}ller", year = "2015", abstract = "The concept of curved planar reformation and showed other applications where it makes sense to reformat data sets. Here, he gave an example of the left ventricle of the human heart.", event = "18th International Conference on Medical Image Computing and Computer Assisted Intervention", location = "Munich, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Groeller_Eduard_2015_SMV/", } @talk{Groeller_Eduard_2015_TCU, title = "The Certainly Uncertain Uncertainty Talk", author = "Eduard Gr\"{o}ller", year = "2015", event = "Workshop on Parameter space/Uncertainty", location = "Technische Universit\"{a}t M\"{u}nchen", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Groeller_Eduard_2015_TCU/", } @studentproject{SCHAUKOWITSCH-2015-STELL, title = "Stellarium 3D Integration", author = "Florian Schaukowitsch", year = "2015", abstract = "Scenery3d is a plugin for the open-source Stellarium planetarium application allowing users to freely observe and analyze 3D architectural structures under a realistic astronomical simulation, including realistic lighting and shadowing. During this project, we ported the pre-existing codebase (from 2012) to the current version of Stellarium, which required several structural changes like a change from Qt 4 to Qt 5 and a fully shader-based renderer. We also focused on improving the rendering performance and quality, and increased usability and code maintainability. Because a big feature of Stellarium is the use of non-linear projections which require more sophisticated rendering methods, several steps have been taken to increase the subjective performance of these methods to increase interactivity. For increasing the shadow quality, percentage-closer soft shadows were implemented. Finally, the plugin was made compatible with OpenGL ES 2.0, for use on embedded platforms or desktops without a suitable OpenGL driver. Links: Stellarium: http://stellarium.org Launchpad (Code): https://code.launchpad.net/~stellarium-scenery3d/stellarium/scenery3d_Qt5 Direct link to user documentation: https://bazaar.launchpad.net/~stellarium-scenery3d/stellarium/scenery3d_Qt5/download/head:/pluginsscenery3ddocs-20111013124518-6rr9sbb5zt1w2ylg-2/Scenery3d.pdf ", keywords = "stellarium", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/SCHAUKOWITSCH-2015-STELL/", } @mastersthesis{wallner-2015-InfoTheoryPlay, title = "Applying Information Theory to Formal Models of Play", author = "Simon Wallner", year = "2015", abstract = "This thesis proposes a formal model of interaction in games, to be used as tool for game analysis and game testing. The model allows a quantification of interaction by looking at the low-level structure and patterns in game-controller input. The game-controller input is modelled using discrete-time, discrete-space Markov chains, and information theory is used to quantify the mismatch between the model’s prediction and the actual user input. The model uses game-agnostic game controller data as its input, which is the lowest common denominator for a large class of games (almost all game console games, most PC games). The models are trained dynamically on-the-fly for each individual play session. This allows performing individual analyses of players’ interactions, while still retaining an approach that is very general and can be used with different games without modification. To adapt to new play situations quickly, the used models are only based on data from the last couple of seconds or minutes. This can lead to the problem that not enough samples may be available to confidently estimate all dynamic model parameters. This problem is mitigated by considering the full probability distribution of each parameter instead, using a beta distribution. This work contributes to the understanding of interaction in games, modelling of raw user input and quantifying the model output using information theory. The described approach has been implemented in software and preliminary results from a prestudy are available. In this exploratory prestudy, the post hoc analysis of nine different games from various genres revealed a number of interaction patterns. One of the observed patterns is routinization, a process in which an action is performed repeatedly until it is executed almost unconsciously. Research in this field, based on this thesis, has been performed in cooperation with Martin Pichlmair from the IT University Copenhagen, and a workin-progress paper is to be published in the proceedings of the ACM SIGCHI Annual Symposium on Computer-Human Interaction in Play (CHI PLAY) [Wallner, S., Pichlmair, M., Hecher, M., and Wimmer, M. (2015). Modeling Routinization in Games - An Information Theory Approach. In Proceedings of the Second ACM SIGCHI Annual Symposium on Computer-human Interaction in Play, page pp, London, UK. ACM.] ", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "formal models of play, user input visualization, understanding interaction in games, gameplay, information theory", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/wallner-2015-InfoTheoryPlay/", } @inproceedings{wallner-2015-ModelingRoutinization, title = "Modeling Routinization in Games: An Information Theory Approach", author = "Simon Wallner and Martin Pichlmair and Michael Hecher and Michael Wimmer", year = "2015", abstract = "Routinization is the result of practicing until an action stops being a goal-directed process. This paper formulates a definition of routinization in games based on prior research in the fields of activity theory and practice theory. Routinization is analyzed using the formal model of discrete-time, discrete-space Markov chains and information theory to measure the actual error between the dynamically trained models and the player interaction. Preliminary research supports the hypothesis that Markov chains can be effectively used to model routinization in games. A full study design is presented to further explore and verify this hypothesis.", isbn = "978-1-4503-3466-2", series = "CHI PLAY ", publisher = "ACM", location = "London, United Kingdom", booktitle = "Proceedings of the 2015 Annual Symposium on Computer-Human Interaction in Play", pages = "727--732", keywords = "Games, Routinization, Markov Chains, Information Theory", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/wallner-2015-ModelingRoutinization/", }