@article{Ribicic_2012_SUS, title = "Sketching Uncertainty into Simulations", author = "Hrvoje Ribi\v{c}i\'{c} and J\"{u}rgen Waser and R Gurbat and Berhard Sadransky and Eduard Gr\"{o}ller", year = "2012", abstract = "In a variety of application areas, the use of simulation steering in decision making is limited at best. Research focusing on this problem suggests that most user interfaces are too complex for the end user. Our goal is to let users create and investigate multiple, alternative scenarios without the need for special simulation expertise. To simplify the specification of parameters, we move from a traditional manipulation of numbers to a sketch-based input approach. Users steer both numeric parameters and parameters with a spatial correspondence by sketching a change onto the rendering. Special visualizations provide immediate visual feedback on how the sketches are transformed into boundary conditions of the simulation models. Since uncertainty with respect to many intertwined parameters plays an important role in planning, we also allow the user to intuitively setup complete value ranges, which are then automatically transformed into ensemble simulations. The interface and the underlying system were developed in collaboration with experts in the field of flood management. The real-world data they have provided has allowed us to construct scenarios used to evaluate the system. These were presented to a variety of flood response personnel, and their feedback is discussed in detail in the paper. The interface was found to be intuitive and relevant, although a certain amount of training might be necessary.", month = dec, journal = "IEEE Transaction on Visualization and Computer Graphics", number = "12", volume = "18", pages = "2255--2264", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Ribicic_2012_SUS/", } @article{SCHERZER-2012-TCM, title = "Temporal Coherence Methods in Real-Time Rendering", author = "Daniel Scherzer and Lei Yang and Oliver Mattausch and Diego Nehab and Pedro V. Sander and Michael Wimmer and Elmar Eisemann", year = "2012", abstract = "Nowadays, there is a strong trend towards rendering to higher-resolution displays and at high frame rates. This development aims at delivering more detail and better accuracy, but it also comes at a significant cost. Although graphics cards continue to evolve with an ever-increasing amount of computational power, the speed gain is easily counteracted by increasingly complex and sophisticated shading computations. For real-time applications, the direct consequence is that image resolution and temporal resolution are often the first candidates to bow to the performance constraints (e.g., although full HD is possible, PS3 and XBox often render at lower resolutions). In order to achieve high-quality rendering at a lower cost, one can exploit temporal coherence (TC). The underlying observation is that a higher resolution and frame rate do not necessarily imply a much higher workload, but a larger amount of redundancy and a higher potential for amortizing rendering over several frames. In this survey, we investigate methods that make use of this principle and provide practical and theoretical advice on how to exploit temporal coherence for performance optimization. These methods not only allow incorporating more computationally intensive shading effects into many existing applications, but also offer exciting opportunities for extending high-end graphics applications to lower-spec consumer-level hardware. To this end, we first introduce the notion and main concepts of TC, including an overview of historical methods. We then describe a general approach, image-space reprojection, with several implementation algorithms that facilitate reusing shading information across adjacent frames. We also discuss data-reuse quality and performance related to reprojection techniques. Finally, in the second half of this survey, we demonstrate various applications that exploit TC in real-time rendering. ", month = dec, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "31", pages = "2378--2408", keywords = "remote rendering; sampling, perception-based rendering, occlusion culling, non-photo-realistic rendering, level-of-detail, large data visualization, image-based rendering, global illumination, frame interpolation, anti-aliasing, shadows, streaming, temporal coherance, upsampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/SCHERZER-2012-TCM/", } @article{Alsallakh_2012_RCW, title = "Reinventing the Contingency Wheel: Scalable Visual Analytics of Large Categorical Data", author = "Bilal Alsallakh and W Aigner and Silvia Miksch and Eduard Gr\"{o}ller", year = "2012", abstract = "Contingency tables summarize the relations between categorical variables and arise in both scientific and business domains. Asymmetrically large two-way contingency tables pose a problem for common visualization methods. The Contingency Wheel has been recently proposed as an interactive visual method to explore and analyze such tables. However, the scalability and readability of this method are limited when dealing with large and dense tables. In this paper we present Contingency Wheel++, new visual analytics methods that overcome these major shortcomings: (1) regarding automated methods, a measure of association based on Pearson's residuals alleviates the bias of the raw residuals originally used, (2) regarding visualization methods, a frequency-based abstraction of the visual elements eliminates overlapping and makes analyzing both positive and negative associations possible, and (3) regarding the interactive exploration environment, a multi-level overview+detail interface enables exploring individual data items that are aggregated in the visualization or in the table using coordinated views. We illustrate the applicability of these new methods with a use case and show how they enable discovering and analyzing nontrivial patterns and associations in large categorical data.", month = dec, journal = "IEEE Transaction on Visualization and Computer Graphics", number = "12", volume = "18", pages = "2849--2858", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Alsallakh_2012_RCW/", } @mastersthesis{Woerister_2012_ACS, title = "A caching system for a dependency-aware scene graph ", author = "Michael Woerister", year = "2012", abstract = "Scene graphs are a common way of representing 3-dimensional scenes for graphical applications. A scene is represented as a hierarchical structure of nodes which represent 3D geometry, spatial transformations, surface properties, and other-possibly application specific-aspects. Scene graph systems can be designed to be very generic and flexible, e.g. by allowing users to implement custom node types and traversals or by providing facilities to dynamically create subgraphs during a traversal. This flexibility comes at the cost of increased time spent in pure traversal logic. Especially for CPU-bound applications this causes a performance drop. This thesis proposes a scene graph caching system that automatically creates an alternative representation of selected subgraphs. This alternative representation poses a render cache in the form of a so-called instruction stream which allows to render the cached subgraph at lower CPU cost and thus more quickly than with a regular render traversal. In order to be able to update render caches incrementally in reaction to certain scene graph changes, a dependency system was developed. This system provides a model for describing and tracking changes in the scene graph and enables the scene graph caching system to update only those parts of the render cache that needs to be updated. The actual performance characteristics of the scene graph caching system were investigated using a number of synthetic test scenes in different configurations. These tests showed that the caching system is most useful in scenes with a high structural complexity (high geometry count and/or deep scene graph hierarchies) and moderate primitive count per geometry.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Woerister_2012_ACS/", } @article{Ropinski-2012-UBT, title = "Unified Boundary-Aware Texturing for Interactive Volume Rendering", author = "Timo Ropinski and Stefan Diepenbrock and Stefan Bruckner and Klaus Hinrichs and Eduard Gr\"{o}ller", year = "2012", abstract = "In this paper, we describe a novel approach for applying texture mapping to volumetric data sets. In contrast to previous approaches, the presented technique enables a unified integration of 2D and 3D textures and thus allows to emphasize material boundaries as well as volumetric regions within a volumetric data set at the same time. One key contribution of this paper is a parametrization technique for volumetric data sets, which takes into account material boundaries and volumetric regions. Using this technique, the resulting parametrizations of volumetric data sets enable texturing effects which create a higher degree of realism in volume rendered images. We evaluate the quality of the parametrization and demonstrate the usefulness of the proposed concepts by combining volumetric texturing with volumetric lighting models to generate photorealistic volume renderings. Furthermore, we show the applicability in the area of illustrative visualization.", month = nov, journal = "IEEE Transactions on Visualization and Computer Graphics", number = "11", volume = "18", pages = "1942--1955", keywords = "interactive volume rendering, volumetric texturing", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Ropinski-2012-UBT/", } @inproceedings{Csebfalvi-2012-IOM, title = "Illumination-Driven Opacity Modulation for Expressive Volume Rendering", author = "Bal\'{a}zs Cs\'{e}bfalvi and Bal\'{a}zs T\'{o}th and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2012", abstract = "Using classical volume visualization, typically a couple of isosurface layers are rendered semi-transparently to show the internal structures contained in the data. However, the opacity transfer function is often difficult to specify such that all the isosurfaces are of high contrast and sufficiently perceivable. In this paper, we propose a volumerendering technique which ensures that the different layers contribute to fairly different regions of the image space. Since the overlapping between the effected regions is reduced, an outer translucent isosurface does not decrease significantly the contrast of a partially hidden inner isosurface. Therefore, the layers of the data become visually well separated. Traditional transfer functions assign color and opacity values to the voxels depending on the density and the gradient. In contrast, we assign also different illumination directions to different materials, and modulate the opacities view-dependently based on the surface normals and the directions of the light sources, which are fixed to the viewing angle. We will demonstrate that this model allows an expressive visualization of volumetric data.", month = nov, location = "Magdeburg, Germany", booktitle = "Proceedings of Vision, Modeling & Visualization 2012", pages = "103--109", keywords = "illustrative visualization, illumination, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Csebfalvi-2012-IOM/", } @mastersthesis{Felberbauer_2012_AG, title = "Games with Purpose - Improving 3D Model and Land Cover Data using Crowdsourcing", author = "Florian Felberbauer", year = "2012", abstract = "A variety of 3D-model databases are available on the internet, but the process of finding the right models is often tiring. This is because the majority of the available models is barely annotated or the quality is low. Annotations often are ambiguous, vague or too specialized. Besides 3Dmodel annotations, remote sensing data can be ambiguous too. Global land cover maps like GlobCover, MODIS and GLC2000 show large differences in certain areas of the world. This lack of correct data is a problem, because it is a basic requirement for a variety of research areas and applications. Consequently, this thesis aims at tackling both aforementioned problems. The task of recognizing and classifying images as well as 3D-models is easy to solve for human beings, but even today rather hard for computer systems. For that reason, this thesis makes use of the concepts of crowdsourcing. The quality of user annotations can be improved by collecting annotations from a variety of users and extract those with the highest frequency. To achieve this, a game has been implemented that unifies crowdsourcing and social games mechanics. This game consists of game-rounds which lead the user through the process of annotating 3D-models as well as land cover data. Also, a drawing round has been implemented to enable the user to classify a given land cover area using a pre-defined set of categories. As crowdsourcing is related to a large number of users, the focus is on implementing a game that provides incentives for users to spend their free time on playing, while solving useful tasks. To reach as many users as possible, the game has been implemented using only HTML5 and JavaScript to circumvent limitations due to missing plugins or external players and to support all systems, including mobile devices. It is also integrated into Facebook to further enlarge the number of reachable users. The potential of the approach is demonstrated on the basis of a user study. The results show that the annotations with the highest frequency are good descriptors for the underlying 3D-models as well as for the land cover maps. None of the top annotations are incorrect for any model or map. Analyzing the user paintings also shows very good results. The majority of maps were classified correctly and even the distribution of categories over the maps are correct to a high degree. We thus show, that the combination of crowdsourcing and social games can improve land cover data and 3D-model annotations. These insights contribute to the ongoing Landspotting project, which is further explained in this thesis.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "social games, games with purpose", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Felberbauer_2012_AG/", } @mastersthesis{ROESSLER-2012-OGLES, title = "Rendering Interactive Maps on Mobile Devices Using Graphics Hardware", author = "Lukas R\"{o}ssler", year = "2012", abstract = "Mapping and navigation applications on mobile devices such as smart phones or tablets are increasingly popular. Modern maps are often rendered directly from vector data. Since the performance of a previous CPU-based map renderer was unsatisfactory, a hardware accelerated map rendering prototype for mobile devices based on OpenGL ES 2.0 was created. A novel hybrid rendering architecture is introduced to combine the advantages of tile-based and true real time rendering solutions. The architecture consists of a tile server that renders base map tile images and a client to display them. The new third component, the post-processor, draws dynamic map features such as icons and text above the tiles in real time, enabling a 3D fly-over mode. All components run inside the same process directly on the device. For the rendering of lines, an important map feature, a new rending algorithm was developed, enabling to draw lines of arbitrary width with one of three different line cap styles. Additionally the line can be stippled with a user-defined pattern where each line dash is rendered with the selected cap style. Antialiasing of the line is supported with an arbitrary circularly symmetric filter kernel of userdefinable radius. To accelerate icon rendering, a texture atlas is used to store the icons, and a simple but effective packing algorithm has been developed to generate the atlas online.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/ROESSLER-2012-OGLES/", } @article{musialski_2012_fice, title = "A Framework for Interactive Image Color Editing", author = "Przemyslaw Musialski and Ming Cui and Jieping Ye and Anshuman Razdan and Peter Wonka", year = "2012", abstract = "We propose a new method for interactive image color replacement that creates smooth and naturally looking results with minimal user interaction. Our system expects as input a source image and rawly scribbled target color values and generates high quality results in interactive rates. To achieve this goal we introduce an algorithm that preserves pairwise distances of the signatures in the original image and simultaneously maps the color to the user defined target values. We propose efficient sub-sampling in order to reduce the computational load and adapt semi-supervised locally linear embedding to optimize the constraints in one objective function. We show the application of the algorithm on typical photographs and compare the results to other color replacement methods.", month = nov, journal = "The Visual Computer", number = "11", volume = "29", pages = "1173--1186", keywords = "interactive image editing, color manipulation, image processing, recoloring, computational photography", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/musialski_2012_fice/", } @mastersthesis{Muehlbacher_2012_RRM, title = "Real-Time Rendering of Measured Materials", author = "Thomas M\"{u}hlbacher", year = "2012", abstract = "Interactive walkthroughs of virtual scenes are not only common in fictional settings such as entertainment and video games, but also a popular way of presenting novel architecture, furnishings or illumination. Due to the high performance requirements of such interactive applications, the presentable detail and quality are limited by the computational hardware. A realistic appearance of materials is one of the most crucial aspects to scene immersion during walkthroughs, and computing it at interactive frame rates is a challenging task. In this thesis an algorithm is presented that achieves the rendering of static scenes featuring view-dependent materials in real-time. For walkthroughs of static scenes, all light propagation but the last view-dependent bounce can be precomputed and stored as diffuse irradiance light maps together with the scene geometry. The specular part of reflection and transmission is then computed dynamically by integrating the incident light approximatively according to view and local material properties. For this purpose, the incident radiance distribution of each object is approximated by a single static environment map that is obtained by rendering the light-mapped scene as seen from the object. For large planar reflectors, a mirror rendering is performed every frame to approximate the incident light distribution instead of a static environment map. Materials are represented using a parametric model that is particularly suitable for fitting to measured reflectance data. Fitting the parameters of a compact model to material measurements provides a straightforward approach of reproducing light interactions of real-world substances on a screen. During walkthroughs, the view-dependent part of the local illumination integral is approximated by sampling the representation of incident light while weighting the samples according to the material properties. Noise-free rendering is achieved by reusing the exact same sampling pattern at all pixels of a shaded object, and by filtering the samples using MIP-maps of the incident light representation. All available samples are regularly placed within the specular lobe to achieve a uniform symmetric coverage of the most important part of the integration domain even when using very few (5-20) samples. Thus, the proposed algorithm achieves a biased but stable and convincing material appearance at real-time frame rates. It is faster than existing random-based sampling algorithms, as fewer samples suffice to achieve a smooth and uniform coverage of specular lobes.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Muehlbacher_2012_RRM/", } @inproceedings{SCHWAERZLER-2012-FAS, title = "Fast Accurate Soft Shadows with Adaptive Light Source Sampling", author = "Michael Schw\"{a}rzler and Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2012", abstract = "Physically accurate soft shadows in 3D applications can be simulated by taking multiple samples from all over the area light source and accumulating them. Due to the unpredictability of the size of the penumbra regions, the required sampling density has to be high in order to guarantee smooth shadow transitions in all cases. Hence, several hundreds of shadow maps have to be evaluated in any scene configuration, making the process computationally expensive. Thus, we suggest an adaptive light source subdivision approach to select the sampling points adaptively. The main idea is to start with a few samples on the area light, evaluating there differences using hardware occlusion queries, and adding more sampling points if necessary. Our method is capable of selecting and rendering only the samples which contribute to an improved shadow quality, and hence generate shadows of comparable quality and accuracy. Even though additional calculation time is needed for the comparison step, this method saves valuable rendering time and achieves interactive to real-time frame rates in many cases where a brute force sampling method does not. ", month = nov, isbn = "978-3-905673-95-1", publisher = "Eurographics Association", location = "Magdeburg, Germany", booktitle = "Proceedings of the 17th International Workshop on Vision, Modeling, and Visualization (VMV 2012)", pages = "39--46", keywords = "soft shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/SCHWAERZLER-2012-FAS/", } @inproceedings{scheiblauer-2012-chnt, title = "Graph-based Guidance in Huge Point Clouds", author = "Claus Scheiblauer and Michael Wimmer", year = "2012", abstract = "In recent years the use of remote sensing devices like laser scanners in the documentation of excavation sites or cultural heritage sites has led to huge point cloud models from these sites. These data sets may cover complete sites including galleries, corridors, halls, and open places. Orienting oneself in the point cloud becomes a challenge, if one is not familiar with the layout of the site. Therefore we propose a graph-based guidance system to show tourists round the point cloud models. The tourists can navigate interactively through the point cloud, but they are tied to a predefined 3D graph which represents the possible ways, and which connects the points of interest.", month = nov, isbn = "978-3-200-03281-1", location = "Vienna, Austria", booktitle = "Proceedings of the 17th International Conference on Cultural Heritage and New Technologies", keywords = "user interface, navigation, point rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/scheiblauer-2012-chnt/", } @phdthesis{amirkhanov-2012-thesis, title = "Visualization of Industrial 3DXCT Data", author = "Artem Amirkhanov", year = "2012", abstract = "Three-dimensional X-ray computed tomography (3DXCT) is a powerful technique for generating a digital 3D volumetric representation of a specimen from a series of 2D X-ray penetration images. The main advantage of 3DXCT is its ability to detect both the interior and the exterior structure of a specimen in one single scan. Having been used in medical diagnostics for a long time, 3DXCT is increasingly employed in industry as a method for nondestructive testing and quality control. One especially challenging industrial application is metrology, which has to fulfill the demands of today’s standards in industrial quality control. 3DXCT facilitates dimensional measurements of internal structures and of inaccessible parts of a component. However the successful industrial application of 3DXCT is constrained by a set of major problems: Artifacts: Industrial 3DXCT systems face problems due to various types of artifacts. The appearance of artifacts in the 3DXCT scan data distorts its correlation to the actual evaluated industrial object and can lead to errors in measurements and false analysis results. Some types of artifacts are affected by the placement of a specimen in the scanning device. Multi-material components: Another problem is occurring when multi-material components (MMCs) are inspected using industrial 3DXCT. Common industrial MMCs may contain metal parts surrounded by plastic materials. A major problem of this type of components is the presence of metal-caused streaking artifacts and distortions. They are located around metal components and significantly influence the material characterization. Furthermore these streaking artefacts and distortions may even prevent any further analysis (especially for the plastic components). Measurements uncertainty: If metrology using 3DXCT is performed, the location of the specimen surface is estimated using the reconstructed 3D volume data. As opposed to mechanical or optical measurement techniques, the surface is not explicit and has a particular positional uncertainty depending on the artifacts and noise in the scan data and the surface extraction algorithm. Conventional CT metrology software does not account for the uncertainty of the data. This thesis is devoted to the development of techniques overcoming the aforementioned problems of common industrial tasks involving the usage of 3DXCT for nondestructive testing and quality control with a main focus on industrial 3DXCT metrology. Several novel contributions utilizing visualization techniques and visual analysis methods were implemented in integrated tools assisting typical industrial 3DXCT tasks during different stages of the data pipeline.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/amirkhanov-2012-thesis/", } @bachelorsthesis{celarek_adam-2012-rrmro, title = "Merging Ray Tracing and Rasterization in Mixed Reality", author = "Adam Celarek", year = "2012", abstract = "In mixed reality, virtual objects are inserted into a video stream of a real environment. This technique can be used for many applications including marketing, simulations and cultural heritage. Therefore it is important that the images look plausible. Many applications also have real time constraints. With traditional rasterization it is difficult to create realistic reflections and refractions. In ray tracing on the other hand this is a trivial task, but rendering is slow. The solution described in this work uses the graphics card for speeding up ray tracing. Additionally it employs a rasterizer for diffuse surfaces and only traces rays if there is a reflective or refractive surface visible. This works by creating a ray tracing mask using the fast rasterizer in a first step. It holds true for reflective or refractive surfaces and false otherwise. Then all diffuse objects are drawn using the rasterizer. Finally rays are traced on each pixel which is masked as reflective or refractive surface by the ray tracing mask. These rays produce secondary rays which can hit a diffuse surface eventually. In this case the ray tracer takes over the shading. Results show, that our hybrid rendering method allows high quality reflections and refractions while still having interactive frame rates in mixed reality scenarios.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Refraction, OptiX, Augmented Reality, Reflection", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/celarek_adam-2012-rrmro/", } @article{knecht_martin_2012_RSMR, title = "Reciprocal Shading for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Oliver Mattausch and Michael Wimmer", year = "2012", abstract = "In this paper we present a novel plausible rendering method for mixed reality systems, which is useful for many real-life application scenarios, like architecture, product visualization or edutainment. To allow virtual objects to seamlessly blend into the real environment, the real lighting conditions and the mutual illumination effects between real and virtual objects must be considered, while maintaining interactive frame rates. The most important such effects are indirect illumination and shadows cast between real and virtual objects. Our approach combines Instant Radiosity and Differential Rendering. In contrast to some previous solutions, we only need to render the scene once in order to find the mutual effects of virtual and real scenes. In addition, we avoid artifacts like double shadows or inconsistent color bleeding which appear in previous work. The dynamic real illumination is derived from the image stream of a fish-eye lens camera. The scene gets illuminated by virtual point lights, which use imperfect shadow maps to calculate visibility. A sufficiently fast scene reconstruction is done at run-time with Microsoft's Kinect sensor. Thus a time-consuming manual pre-modeling step of the real scene is not necessary. Our results show that the presented method highly improves the illusion in mixed-reality applications and significantly diminishes the artificial look of virtual objects superimposed onto real scenes.", month = nov, issn = "0097-8493", journal = "Computers & Graphics", number = "7", volume = "36", pages = "846--856", keywords = "Differential rendering, Reconstruction, Instant radiosity, Microsoft Kinect, Real-time global illumination, Mixed reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/knecht_martin_2012_RSMR/", } @WorkshopTalk{VisWeek-Tutorial-2012-Uncertainty, title = "IEEE VisWeek 2012 Tutorial on Uncertainty and Parameter Space Analysis in Visualization", author = "Christoph Heinzl and Stefan Bruckner and Eduard Gr\"{o}ller and Alex Pang and Hans-Christian Hege and Kristin Potter and R\"{u}diger Westermann and Tobias Pfaffelmoser and Torsten M\"{o}ller", year = "2012", abstract = "Within the past decades visualization advanced to a powerful means of exploring and analyzing data. Recent developments in both hard- and software contributed to previously unthinkable evaluations and visualizations of data with strongly increasing sizes and levels of complexity. Providing just insight into available data of a problem seems not to be sufficient anymore: Uncertainty and parameter space analyses in visualization are becoming more prevalent and may be found in astronomic, (bio)-medical, industrial, and engineering applications. The major goal is to find out, at which stage of the pipeline - from data acquisition to the final rendering of the output image - how much uncertainty is introduced and consequently how the desired result (e.g., a dimensional measurement feature) is affected. Therefore effective methods and techniques are required by domain specialists, which help to understand how data is generated, how reliable is the generated data, and where and why data is uncertain. Furthermore, as the problems to investigate are becoming increasingly complex, also finding suitable algorithms providing the desired solution tends to be more difficult. Additional questions may arise, e.g., how does a slight parameter change modify the result, how stable is a parameter, in which range is a parameter stable or which parameter set is optimal for a specific problem. Metaphorically speaking, an algorithm for solving a problem may be seen as finding a path through some rugged terrain (the core problem) ranging from the high grounds of theory to the haunted swamps of heuristics. There are many different paths through this terrain with different levels of comfort, length, and stability. Finding all possible paths corresponds in our case to doing an analysis of all possible parameters of a problem solving algorithm, which yields a typically multi-dimensional parameter space. This parameter space allows for an analysis of the quality and stability of a specific parameter set. In many cases of conventional visualization approaches the issues of uncertainty and parameter space analyses are neglected. For a long time, uncertainty - if visualized at all - used to be depicted as blurred data. But in most cases the uncertainty in the base data is not considered at all and just the quantities of interest are calculated. And even to calculate these quantities of interest, too often an empirically found parameter set is used to parameterize the underlying algorithms without exploring its sensitivity to changes and without exploring the whole parameter space to find the global or a local optimum. This tutorial aims to open minds and to look at our data and the parameter sets of our algorithms with a healthy skepticism. In the tutorial we combine uncertainty visualization and parameter space analyses which we believe is essential for the acceptance and applicability of future algorithms and techniques. The tutorial provides six sessions starting with an overview of uncertainty visualization including a historical perspective, uncertainty modeling and statistical visualization. The second part of the tutorial will be dedicated to structural uncertainty, parameter space analysis, industrial applications of uncertainty visualization and an outlook in this domain. ", month = oct, event = "IEEE VisWeek", location = "Seattle, WA, USA", keywords = "uncertainty visualization, parameter space analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/VisWeek-Tutorial-2012-Uncertainty/", } @mastersthesis{Pajer_2012_VoM, title = "Visualization Of Multivariate Networks", author = "Stephan Pajer", year = "2012", abstract = "Multivariate networks are graphs, which consist of a set of nodes and a set of connections between these nodes, that have additional data dimensions for each node and/or connection. Such multivariate graphs are prevalent in a number of di erent fields, including biological systems and social sciences. In this thesis, an existing linked-view system for analyzing multivariate data has been extended for the analysis of networks. A node-link view has been implemented to give an overview of the graph. To leverage existing visualizations, additional data about the structure of the network can be added to the nodes and analyzed with views designed for unconnected multivariate data. Another contribution is a novel visualization that supports the study of queries concerning relationships between different groups of nodes in a network.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Pajer_2012_VoM/", } @mastersthesis{hecher-2012-MH, title = "A Comparative Perceptual Study of Soft Shadow Algorithms", author = "Michael Hecher", year = "2012", abstract = "While a huge body of soft shadow algorithms has been proposed, there has been no methodical study for comparing different real-time shadowing algorithms with respect to their plausibility and visual appearance. Therefore, a study was designed to identify and evaluate scene properties with respect to their relevance to shadow quality perception. Since there are so many factors that might influence perception of soft shadows (e.g., complexity of objects, movement, and textures), the study was designed and executed in a way on which future work can build on. The evaluation concept not only captures the predominant case of an untrained user experiencing shadows without comparing them to a reference solution, but also the cases of trained and experienced users. We achieve this by reusing the knowledge users gain during the study. Moreover, we thought that the common approach of a two-option forced-choice-study can be frustrating for participants when both choices are so similar that people think they are the same. To tackle this problem a neutral option was provided. For time-consuming studies, where frustrated participants tend to arbitrary choices, this is a useful concept. Speaking with participants after the study and evaluating the results, supports our choice for a third option. The results are helpful to guide the design of future shadow algorithms and allow researchers to evaluate algorithms more effectively. They also allow developers to make better performance versus quality decisions for their applications. One important result of this study is that we can scientifically verify that, without comparison to a reference solution, the human perception is relatively indifferent to a correct soft shadow. Hence, a simple but robust soft shadow algorithm is the better choice in real-world situations. Another finding is that approximating contact hardening in soft shadows is sufficient for the average user and not significantly worse for experts.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Perception Studies, Soft Shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/hecher-2012-MH/", } @mastersthesis{Stabel_2012_UDM, title = "User-driven Manipulation of Geospatial Data", author = "Florian Stabel", year = "2012", abstract = "A WebGIS is an application designed to store, analyze, manage and present geographical data. It provides tools which support users to explore and understand the underlying data. Depending on the datasource (raster or vector data), these tools are varying. Many more styling options for vector data than for raster data exist. This is because of the position information of vector data which allows the system to apply a wider range of styling features. This thesis is about user-driven manipulation of geospatial data and is part of the ESA DUE Permafrost project. It should help users to navigate through remote sensed data and to explore relevant features of it. Therefore it introduces methods to change the visualization of the data within a WebGIS by assigning different style profiles at runtime. The reference implementation is based on a specific software arrangement. The data is managed by GeoServer, an open source software server with the capability to edit and share geospatial data. To get a visual representation, GeoServer applies a Styled Layer Descriptor (SLD) on the data. A SLD is an XML based language and describes the appearance of associated layers. Modifying the SLD enables to influence the layer appearance. Since the data of the ESA DUE Permafrost project is of type raster only, this work concentrates on tools for styling raster data. As a result of this work, an interactive legend is introduced. It is a user interface and manipulates the SLD at runtime according to the user settings. This gives the user the capability to highlight areas of interest based on the underlying data. The user interface also acts as a color legend for the displayed data.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Stabel_2012_UDM/", } @bachelorsthesis{MAESTRI-2012-MRW, title = "SPEX: Visualisieren von 3D-Brillenmodellen im Browser mittels WebGL", author = "Luca Maestri", year = "2012", abstract = "Diese Arbeit dokumentiert den Online-Teil der Spex Applikation, ein Projekt, welches durch die Kollaboration von JFPartners und der Technischen Universit\"{a}t Wien entstanden ist. Spex soll die M\"{o}glichkeit bieten, virtuelle 3-dimensionale Brillenmodelle auf Fotos von Kunden zu rendern. Dadurch k\"{o}nnen Kunden Brillenmodelle aus Onlinekatalogen anprobieren, ohne dass der Verk\"{a}ufer alle Brillen auf Lager haben muss. Die Technologie, die zum Rendern der 3-dimensionalen Szene im Browser verwendet wurde, hei{\ss}t WebGL, ein neuer Standard der Khronos-Gruppe, der dem Browser Zugriff auf die Grafikkarte anbietet. Vorteile des WebGL sind, dass er mittlerweile von den meisten Browsern unterst\"{u}tzt wird und dass die gesamte Berechnung der Szene auf der Grafikkarte des Clients l\"{a}uft. Dadurch entf\"{a}llt Last am Server.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "WebGL", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/MAESTRI-2012-MRW/", } @inproceedings{mistelbauer-2012-ssv, title = "Smart Super Views - A Knowledge-Assisted Interface for Medical Visualization", author = "Gabriel Mistelbauer and Hamed Bouzari and R\"{u}diger Schernthaner and Ivan Baclija and Arnold K\"{o}chl and Stefan Bruckner and Milo\v{s} \v{S}r\'{a}mek and Eduard Gr\"{o}ller", year = "2012", abstract = "Due to the ever growing volume of acquired data and information, users have to be constantly aware of the methods for their exploration and for interaction. Of these, not each might be applicable to the data at hand or might reveal the desired result. Owing to this, innovations may be used inappropriately and users may become skeptical. In this paper we propose a knowledge-assisted interface for medical visualization, which reduces the necessary effort to use new visualization methods, by providing only the most relevant ones in a smart way. Consequently, we are able to expand such a system with innovations without the users to worry about when, where, and especially how they may or should use them. We present an application of our system in the medical domain and give qualitative feedback from domain experts.", month = oct, publisher = "IEEE Computer Society", location = "Seattle, WA, USA", booktitle = "IEEE Conference on Visual Analytics Science and Technology (IEEE VAST) 2012", pages = "163--172", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/mistelbauer-2012-ssv/", } @bachelorsthesis{spelitz_stefan-2012-CDTFMR, title = "Color Distribution Transfer For Mixed-Reality Applications", author = "Stefan Spelitz", year = "2012", abstract = "In mixed-reality environments it is essential to integrate virtual objects seamlessly into a real scene. Virtual objects should have similar appearances to those of real objects captured by a video camera. This is useful for many real-life application scenarios, including product advertising and visualization, edutainment systems or for enhancing cultural heritage sites. Typical problems in this domain are to match the current ‘color mood’ of the video camera scene with the colors of virtual (rendered) objects. The color mood depends on the global illumination conditions as well as the hue, saturation or white balance settings of the camera. The aim of this paper is to integrate existing methods of histogram transfers used in the domain of computational photography into mixed-reality environments. These methods allow us to simulate current luminance conditions in the scene and changes in the camera driver settings to apply them onto virtual objects. This thesis contains two fast-running approaches to provide a color mapping between virtual objects and the real scene, which can be used in real-time applications. The results show that these methods increase the immersion of virtual objects in a real scene.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Augmented Reality, Color Transfer", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/spelitz_stefan-2012-CDTFMR/", } @phdthesis{varchola_andrej-2012-fetoscopic, title = "Live Fetoscopic Visualization of 4D Ultrasound Data", author = "Andrej Varchola", year = "2012", abstract = "Ultrasound (US) imaging is due to its real-time character, low cost, non-invasive nature, high availability, and many other factors, considered a standard diagnostic procedure during pregnancy. The quality of diagnostics depends on many factors, including scanning protocol, data characteristics and visualization algorithms. In this work, several problems of ultrasound data visualization for obstetric ultrasound imaging are discussed and addressed. The capability of ultrasound scanners is growing and modern ultrasound devices produce large amounts of data that have to be processed in real-time. An ultrasound imaging system is in a broad sense a pipeline of several operations and visualization algorithms. Individual algorithms are usually organized in modules that separately process the data. In order to achieve the required level of detail and high quality images with the visualization pipeline, we had to address the flow of large amounts of data on modern computer hardware with limited capacity. We developed a novel architecture of visualization pipeline for ultrasound imaging. This visualization pipeline combines several algorithms, which are described in this work, into the integrated system. In the context of this pipeline, we advocate slice-based streaming as a possible approach for the large data flow problem. Live examination of the moving fetus from ultrasound data is a challenging task which requires extensive knowledge of the fetal anatomy and a proficient operation of the ultrasound machine. The fetus is typically occluded by structures which hamper the view in 3D rendered images. We developed a novel method of visualizing the human fetus for prenatal sonography from 3D/4D ultrasound data. It is a fully automatic method that can recognize and render the fetus without occlusion, where the highest priority is to achieve an unobstructed view of the fetal face. Our smart visibility method for prenatal ultrasound is based on a ray-analysis performed within image-based direct volume rendering (DVR). It automatically calculates a clipping surface that removes the uninteresting structures and uncovers the interesting structures of the fetal anatomy behind. The method is able to work with the data streamed on-the-fly from the ultrasound transducer and to visualize a temporal sequence of reconstructed ultrasound data in real time. It has the potential to minimize the interaction of the operator and to improve the comfort of patients by decreasing the investigation time. This can lead to an increased confidence in the prenatal diagnosis with 3D ultrasound and eventually decrease the costs of the investigation. Ultrasound scanning is very popular among parents who are interested in the health condition of their fetus during pregnancy. Parents usually want to keep the ultrasound images as a memory for the future. Furthermore, convincing images are important for the confident communication of findings between clinicians and parents. Current ultrasound devices offer advanced imaging capabilities, but common visualization methods for volumetric data only provide limited visual fidelity. The standard methods render only images with a plastic-like appearance which do not correspond to naturally looking fetuses. This is partly due to the dynamic and noisy nature of the data which limits the applicability of standard volume visualization techniques. In this thesis, we present a fetoscopic rendering method which aims to reproduce the quality of fetoscopic examinations (i.e., physical endoscopy of the uterus) from 4D sonography data. Based on the requirements of domain experts and the constraints of live ultrasound imaging, we developed a method for high-quality rendering of prenatal examinations. We employ a realistic illumination model which supports shadows, movable light sources, and realistic rendering of the human skin to provide an immersive experience for physicians and parents alike. Beyond aesthetic aspects, the resulting visualizations have also promising diagnostic applications. The presented fetoscopic rendering method has been successfully integrated in the state-of-the-art ultrasound imaging systems of GE Healthcare as HDlive imaging tool. It is daily used in many prenatal imaging centers around the world.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "ultrasound, volume rendering, medical imaging", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/varchola_andrej-2012-fetoscopic/", } @inproceedings{Morar_2012_ISB, title = "Image Segmentation Based on Active Contours without Edges", author = "Anca Morar and Florica Moldoveanu and Eduard Gr\"{o}ller", year = "2012", abstract = "There are a lot of image segmentation techniques that try to differentiate between background and object pixels, but many of them fail to discriminate between different objects that are close to each other. Some image characteristics like low contrast between background and foreground or inhomogeneity within the objects increase the difficulty of correctly segmenting images. We designed a new segmentation algorithm based on active contours without edges. We also used other image processing techniques such as nonlinear anisotropic diffusion and adaptive thresholding in order to overcome the images’ problems stated above. Our algorithm was tested on very noisy images, and the results were compared to those obtained with known methods, like segmentation using active contours without edges and graph cuts. The new technique led to very good results, but the time complexity was a drawback. However, this drawback was significantly reduced with the use of graphical programming. Our segmentation method has been successfully integrated in a software application whose aim is to segment the bones from CT datasets, extract the femur and produce personalized prostheses in hip arthroplasty.", month = aug, location = "Cluj-Napoca, Romania", event = "8th IEEE International Conference on Intelligent Computer Communication and Processing 2012", booktitle = "IEEE ICCP 2012 - Proceedings", pages = "213--220", keywords = "Active contours without edges, image segmentation, nonlinear anisotropic diffusion, parallel image processing", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Morar_2012_ISB/", } @bachelorsthesis{BORZA-2012-Stellarium, title = "Stellarium - Effects", author = "Andrei Borza", year = "2012", abstract = "For this thesis we focused on improving Scenery3d, a plugin for the popular open source software Stellarium. Scenery3d was developed by students of the Vienna University of Technology to enable walking through models of architectural structures, which were built with astronomical orientation in mind.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/BORZA-2012-Stellarium/", } @misc{Auzinger_2012_GeigerCam, title = "GeigerCam: Measuring Radioactivity with Webcams", author = "Thomas Auzinger and Ralf Habel and Andreas Musilek and Dieter Hainz and Michael Wimmer", year = "2012", abstract = "Measuring radioactivity is almost exclusively a professional task in the realms of science, industry and defense, but recent events spur the interest in low-cost consumer detection devices. We show that by using image processing techniques, a current, only slightly modified, off-the-shelf HD webcam can be used to measure alpha, beta as well as gamma radiation. In contrast to dedicated measurement devices such as Geiger counters, our framework can classify the type of radiation and can differentiate between various kinds of radioactive materials. By optically insulating the camera's imaging sensor, recordings at extreme exposure and gain values are possible, and the partly very faint signals detectable. The camera is set to the longest exposure time possible and to a very high gain to detect even faint signals. During measurements, GPU assisted real-time image processing of the direct video feed is used to treat the remaining noise by tracking the noise spectrum per pixel, incorporating not only spatial but also temporal variations due to temperature changes and spontaneous emissions. A confidence value per pixel based on event probabilities is calculated to identify potentially hit pixels. Finally, we use morphological clustering to group pixels into particle impact events and analyze their energies. Our approach results in a simple device that can be operated on any computer and costs only $20-30, an order of magnitude cheaper than entry-level nuclear radiation detectors.", month = aug, publisher = "ACM", location = "Los Angeles, CA", address = "New York, NY, USA", isbn = "978-1-4503-1682-8", event = "ACM SIGGRAPH 2012", editor = "Dan Wexler", booktitle = "ACM SIGGRAPH 2012 Posters", Conference date = "Poster presented at ACM SIGGRAPH 2012 (2012-08-05--2012-08-09)", note = "40:1--40:1", pages = "40:1 – 40:1", keywords = "radioactivity, webcam, measurement", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Auzinger_2012_GeigerCam/", } @inproceedings{Eibner-12, title = " GPU-based Multi-Resolution Image Analysis for Synthesis of Tileable Textures", author = "Gottfried Eibner and Anton Fuhrmann and Werner Purgathofer", year = "2012", abstract = "We propose a GPU-based algorithm for texture analysis and synthesis of nearly-regular patterns, in our case scanned textiles or similar manufactured surfaces. The method takes advantage of the highly parallel execution on the GPU to generate correlation maps from captured template images. In an analysis step a lattice encoding the periodicity of the texture is computed. This lattice is used to synthesize the smallest texture tile describing the underlying pattern. Compared to other approaches, our method analyzes and synthesizes a valid lattice model without any user interaction. It is robust against small distortions and fast compared to other, more general approaches. ", month = jul, isbn = "978-3-642-33190-9", series = "Lecture Notes in Computer Science (LNCS) series", publisher = "Springer-Verlag", organization = "Symposium proceedings", location = "Creta, Greece", issn = "0302-9743", booktitle = "Proceedings Intern. Symposium on Visual Computing (ISVC 2012)", pages = "479--488", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Eibner-12/", } @phdthesis{ohrhallinger-stefan-2012-the, title = "The Intrinsic Shape of Point Clouds", author = "Stefan Ohrhallinger", year = "2012", abstract = "Given a point cloud, in the form of unorganized points, the problem of automatically connecting the dots to obtain an aesthetically pleasing and piecewise-linear closed interpolating boundary shape has been extensively researched for over three decades. In R3 , it is even more complicated to find an aesthetic closed oriented surface. Most previous methods for shape reconstruction exclusively from coordinates work well only when the point spacing on the shape boundary is dense and locally uniform. The problem of shape construction from non-dense and locally non-uniformly spaced point sets is in our opinion not yet satisfactorily solved. Various extensions to earlier methods do not work that well and do not provide any performance guarantees either. Our main thesis in this research is that a point set, even with non-dense and locally non-uniform spacing, has an intrinsic shape which optimizes in some way the Gestalt principles of form perception. This shape can be formally defined as the minimum of an energy function over all possible closed linear piece-wise interpolations of this point set. Further, while finding this optimal shape is NP-hard, it is possible to heuristically search for an acceptable approximation within reasonable time. Our minimization objective is guided by Gestalt’s laws of Proximity, Good Continuity and Closure. Minimizing curvature tends to satisfy proximity and good continuity. For computational simplification, we globally minimize the longest-edge-in-simplex, since it is intrinsic to a single facet and also a factor in mean curvature. And we require a closed shape. Using such an intrinsic criterion permits the extraction of an approximate shape with a linearithmic algorithm as a simplicial complex, which we have named the Minimum Boundary Complex. Experiments show that it seems to be a very close approximation to the desired boundary shape and that it retains its genus. Further it can be constructed locally and can also handle sensor data with significant noise. Its quick construction is due to not being restricted by the manifold property, required in the boundary shape. Therefore it has many applications where a manifold shape is not necessary, e.g. visualization, shape retrieval, shadow mapping, and topological data analysis in higher dimensions. The definition of the Minimum Boundary Complex is our first major contribution. Our next two contributions include new methods for constructing boundary shapes by transforming the boundary complex into a close approximation of the minimum boundary shape. These algorithms vary a topological constraint to first inflate the boundary complex to recover a manifold hull and then sculpture it to extract a Minimum Boundary approximation, which interpolates all the points. In the R3 method, we show how local minima can be avoided by covering holes in the hull. Finally, we apply a mesh fairing step to optimize mean curvature directly. We present results for shape construction in R2 and R3 , which clearly demonstrate that our methods work better than the best performing earlier methods for non-dense and locally non-uniformly spaced point sets, while maintaining competitive linearithmic complexity. ", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Surface Reconstruction, Manifold Reconstruction, Point Cloud, Shape Boundary, Gestalt, Curve Reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/ohrhallinger-stefan-2012-the/", } @article{PMI_AR_2012, title = "Porosity Maps – Interactive Exploration and Visual Analysis of Porosity in Carbon Fiber Reinforced Polymers", author = "Andreas Reh and B Plank and J Kastner and Eduard Gr\"{o}ller and Christoph Heinzl", year = "2012", abstract = "In this work a novel method for the characterization of porosity in carbon fiber reinforced polymers (CFRP) is presented. A visualization pipeline for the interactive exploration and visual analysis of CFRP specimens is developed to enhance the evaluation workflow for non-destructive testing (NDT) practitioners based on specified tasks. Besides quantitative porosity determination and the calculation of local pore properties, i.e., volume, surface, dimensions and shape factors, we employ a drill-down approach to explore pores in a CFRP specimen. We introduce Porosity Maps (PM), to allow for a fast porosity evaluation of the specimen. Pores are filtered in two stages. First a region of interest is selected in the porosity maps. Second, pores are filtered with parallel coordinates according to their local properties. Furthermore a histogram-based best-viewpoint widget was implemented to visualize the quality of viewpoints on a sphere. The advantages of our approach are demonstrated using real world CFRP specimens. We are able to show that our visualization-driven approach leads to a better evaluation of CFRP components than existing reference methods.", month = jun, journal = "Computer Graphics Forum,", volume = "31", number = "3", pages = "1185--1194", keywords = "Interaction Techniques, Methodology and techniques", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/PMI_AR_2012/", } @article{schedl-2012-dof, title = "A layered depth-of-field method for solving partial occlusion", author = "David Schedl and Michael Wimmer", year = "2012", abstract = "Depth of field (DoF) represents a distance range around a focal plane, where objects on an image are crisp. DoF is one of the effects which significantly contributes to the photorealism of images and therefore is often simulated in rendered images. Various methods for simulating DoF have been proposed so far, but little tackle the issue of partial occlusion: Blurry objects near the camera are semi-transparent and result in partially visible background objects. This effect is strongly apparent in miniature and macro photography. In this work a DoF method is presented which simulates partial occlusion. The contribution of this work is a layered method where the scene is rendered into layers. Blurring is done efficiently with recursive Gaussian filters. Due to the usage of Gaussian filters big artifact-free blurring radii can be simulated at reasonable costs.", month = jun, journal = "Journal of WSCG", volume = "20", number = "3", issn = "1213-6972", pages = "239--246", keywords = "realtime, rendering, depth-of-field, layers, depth peeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/schedl-2012-dof/", } @article{Birkeland-2012-IMC, title = "Illustrative Membrane Clipping", author = "{\AA}smund Birkeland and Stefan Bruckner and Andrea Brambilla and Ivan Viola", year = "2012", abstract = "Clipping is a fast, common technique for resolving occlusions. It only requires simple interaction, is easily understandable, and thus has been very popular for volume exploration. However, a drawback of clipping is that the technique indiscriminately cuts through features. Illustrators, for example, consider the structures in the vicinity of the cut when visualizing complex spatial data and make sure that smaller structures near the clipping plane are kept in the image and not cut into fragments. In this paper we present a new technique, which combines the simple clipping interaction with automated selective feature preservation using an elastic membrane. In order to prevent cutting objects near the clipping plane, the deformable membrane uses underlying data properties to adjust itself to salient structures. To achieve this behaviour, we translate data attributes into a potential field which acts on the membrane, thus moving the problem of deformation into the soft-body dynamics domain. This allows us to exploit existing GPU-based physics libraries which achieve interactive frame rates. For manual adjustment, the user can insert additional potential fields, as well as pinning the membrane to interesting areas. We demonstrate that our method can act as a flexible and non-invasive replacement of traditional clipping planes.", month = jun, journal = "Computer Graphics Forum", volume = "31", number = "3", note = "presented at EuroVis 2012", pages = "905--914", keywords = "illustrative visualization, volume rendering, clipping", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Birkeland-2012-IMC/", } @article{MATTAUSCH-2012-TIS, title = "Tessellation-Independent Smooth Shadow Boundaries", author = "Oliver Mattausch and Daniel Scherzer and Michael Wimmer and Takeo Igarashi", year = "2012", abstract = "We propose an efficient and light-weight solution for rendering smooth shadow boundaries that do not reveal the tessellation of the shadow-casting geometry. Our algorithm reconstructs the smooth contours of the underlying mesh and then extrudes shadow volumes from the smooth silhouettes to render the shadows. For this purpose we propose an improved silhouette reconstruction using the vertex normals of the underlying smooth mesh. Then our method subdivides the silhouette loops until the contours are sufficiently smooth and project to smooth shadow boundaries. This approach decouples the shadow smoothness from the tessellation of the geometry and can be used to maintain equally high shadow quality for multiple LOD levels. It causes only a minimal change to the fill rate, which is the well-known bottleneck of shadow volumes, and hence has only small overhead. ", month = jun, journal = "Computer Graphics Forum", volume = "4", number = "31", issn = "1467-8659", pages = "1465--1470", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/MATTAUSCH-2012-TIS/", } @article{Herghelegiu-2012-BPV, title = "Biopsy Planner - Visual Analysis for Needle Pathway Planning in Deep Seated Brain Tumor Biopsy", author = "Paul Herghelegiu and Vasile Manta and Radu Perin and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2012", abstract = "Biopsies involve taking samples from living tissue using a biopsy needle. In current clinical practice they are a first mandatory step before any further medical actions are planned. Performing a biopsy on a deep seated brain tumor requires considerable time for establishing and validating the desired biopsy needle pathway to avoid damage. In this paper, we present a system for the visualization, analysis, and validation of biopsy needle pathways. Our system uses a multi-level approach for identifying stable needle placements which minimize the risk of hitting blood vessels. This is one of the major dangers in this type of intervention. Our approach helps in identifying and visualizing the point on the pathway that is closest to a surrounding blood vessel, requiring a closer inspection by the neurosurgeon. An evaluation by medical experts is performed to demonstrate the utility of our system.", month = jun, journal = "Computer Graphics Forum", volume = "31", number = "3", note = "presented at EuroVis 2012", pages = "1085--1094", keywords = "biopsy planning, medical visualization, visual analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Herghelegiu-2012-BPV/", } @article{knecht_martin_2012_BRDFEstimation, title = "Interactive BRDF Estimation for Mixed-Reality Applications", author = "Martin Knecht and Georg Tanzmeister and Christoph Traxler and Michael Wimmer", year = "2012", abstract = "Recent methods in augmented reality allow simulating mutual light interactions between real and virtual objects. These methods are able to embed virtual objects in a more sophisticated way than previous methods. However, their main drawback is that they need a virtual representation of the real scene to be augmented in the form of geometry and material properties. In the past, this representation had to be modeled in advance, which is very time consuming and only allows for static scenes. We propose a method that reconstructs the surrounding environment and estimates its Bidirectional Reflectance Distribution Function (BRDF) properties at runtime without any preprocessing. By using the Microsoft Kinect sensor and an optimized hybrid CPU & GPU-based BRDF estimation method, we are able to achieve interactive frame rates. The proposed method was integrated into a differential instant radiosity rendering system to demonstrate its feasibility.", month = jun, journal = "Journal of WSCG", volume = "20", number = "1", issn = "1213-6972", pages = "47--56", keywords = "Augmented Reality, BRDF Estimation, Reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/knecht_martin_2012_BRDFEstimation/", } @inproceedings{fink-2012-cg1, title = "Teaching a Modern Graphics Pipeline Using a Shader-based Software Renderer", author = "Heinrich Fink and Thomas Weber and Michael Wimmer", year = "2012", abstract = "Shaders are a fundamental pattern of the modern graphics pipeline. This paper presents a syllabus for an introductory computer graphics course that emphasizes the use of programmable shaders while teaching raster-level algorithms at the same time. We describe a Java-based framework that is used for programming assignments in this course. This framework implements a shader-enabled software renderer and an interactive 3D editor. We also show how to create attractive course materials by using COLLADA, an open standard for 3D content exchange.", month = may, publisher = "Eurographics Association", location = "Cagliari, Italy", issn = "1017-4656", event = "Eurographics 2012", editor = "Giovanni Gallo and Beatriz Sousa Santos", booktitle = "Eurographics 2012 -- Education Papers", pages = "73--80", keywords = "Education, Collada, Java, Introductory Computer Graphics, Software Rasterizer", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/fink-2012-cg1/", } @article{Habel_2012_PSP, title = "Practical Spectral Photography", author = "Ralf Habel and Michael Kudenov and Michael Wimmer", year = "2012", abstract = "We introduce a low-cost and compact spectral imaging camera design based on unmodified consumer cameras and a custom camera objective. The device can be used in a high-resolution configuration that measures the spectrum of a column of an imaged scene with up to 0.8 nm spectral resolution, rivalling commercial non-imaging spectrometers, and a mid-resolution hyperspectral mode that allows the spectral measurement of a whole image, with up to 5 nm spectral resolution and 120x120 spatial resolution. We develop the necessary calibration methods based on halogen/fluorescent lamps and laser pointers to acquire all necessary information about the optical system. We also derive the mathematical methods to interpret and reconstruct spectra directly from the Bayer array images of a standard RGGB camera. This objective design introduces accurate spectral remote sensing to computational photography, with numerous applications in color theory, colorimetry, vision and rendering, making the acquisition of a spectral image as simple as taking a high-dynamic-range image.", month = may, journal = "Computer Graphics Forum (Proceedings EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "449--458", keywords = "Computational Photography, Spectroscopy, Computed Tomography Imaging Spectrometer, Practical", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Habel_2012_PSP/", } @article{Peter_2012_AIV, title = "Semantics by Analogy for Illustrative Volume Visualization", author = "Moritz Gerl and Peter Rautek and Tobias Isenberg and Eduard Gr\"{o}ller", year = "2012", abstract = "We present an interactive graphical approach for the explicit specification of semantics for volume visualization. This explicit and graphical specification of semantics for volumetric features allows us to visually assign meaning to both input and output parameters of the visualization mapping. This is in contrast to the implicit way of specifying semantics using transfer functions. In particular, we demonstrate how to realize a dynamic specification of semantics which allows to flexibly explore a wide range of mappings. Our approach is based on three concepts. First, we use semantic shader augmentation to automatically add rule-based rendering functionality to static visualization mappings in a shader program, while preserving the visual abstraction that the initial shader encodes. With this technique we extend recent developments that define a mapping between data attributes and visual attributes with rules, which are evaluated using fuzzy logic. Second, we let users define the semantics by analogy through brushing on renderings of the data attributes of interest. Third, the rules are specified graphically in an interface that provides visual clues for potential modifications. Together, the presented methods offer a high degree of freedom in the specification and exploration of rule-based mappings and avoid the limitations of a linguistic rule formulation.", month = may, journal = "Computers & Graphics", number = "3", volume = "36", pages = "201--213", keywords = "shader augmentation, semantic visualization mapping, illustrative visualization, Volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Peter_2012_AIV/", } @inproceedings{preiner_2012_AS, title = "Auto Splats: Dynamic Point Cloud Visualization on the GPU", author = "Reinhold Preiner and Stefan Jeschke and Michael Wimmer", year = "2012", abstract = "Capturing real-world objects with laser-scanning technology has become an everyday task. Recently, the acquisition of dynamic scenes at interactive frame rates has become feasible. A high-quality visualization of the resulting point cloud stream would require a per-frame reconstruction of object surfaces. Unfortunately, reconstruction computations are still too time-consuming to be applied interactively. In this paper we present a local surface reconstruction and visualization technique that provides interactive feedback for reasonably sized point clouds, while achieving high image quality. Our method is performed entirely on the GPU and in screen pace, exploiting the efficiency of the common rasterization pipeline. The approach is very general, as no assumption is made about point connectivity or sampling density. This naturally allows combining the outputs of multiple scanners in a single visualization, which is useful for many virtual and augmented reality applications.", month = may, isbn = " 978-3-905674-35-4", organization = "Eurographics Association 2012", location = "Cagliari", editor = "H. Childs and T. Kuhlen", booktitle = "Proceedings of Eurographics Symposium on Parallel Graphics and Visualization", pages = "139--148", keywords = "point clouds, surface reconstruction, point rendering, Auto Splats, KNN search, GPU rendering, point based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/preiner_2012_AS/", } @article{Auzinger_2012_AAA, title = "Analytic Anti-Aliasing of Linear Functions on Polytopes", author = "Thomas Auzinger and Michael Guthe and Stefan Jeschke", year = "2012", abstract = "This paper presents an analytic formulation for anti-aliased sampling of 2D polygons and 3D polyhedra. Our framework allows the exact evaluation of the convolution integral with a linear function defined on the polytopes. The filter is a spherically symmetric polynomial of any order, supporting approximations to refined variants such as the Mitchell-Netravali filter family. This enables high-quality rasterization of triangles and tetrahedra with linearly interpolated vertex values to regular and non-regular grids. A closed form solution of the convolution is presented and an efficient implementation on the GPU using DirectX and CUDA C is described.", month = may, journal = "Computer Graphics Forum (Proceedings of EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "335--344", keywords = "Polytope, Filter Design, Analytic Anti-Aliasing, Sampling, Integral Formula, Spherically Symmetric Filter, CUDA, Closed Form Solution, 2D 3D", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Auzinger_2012_AAA/", } @article{musialski-2012-icb, title = "Interactive Coherence-Based Fa\c{c}ade Modeling", author = "Przemyslaw Musialski and Michael Wimmer and Peter Wonka", year = "2012", abstract = "We propose a novel interactive framework for modeling building fa\c{c}ades from images. Our method is based on the notion of coherence-based editing which allows exploiting partial symmetries across the fa\c{c}ade at any level of detail. The proposed workflow mixes manual interaction with automatic splitting and grouping operations based on unsupervised cluster analysis. In contrast to previous work, our approach leads to detailed 3d geometric models with up to several thousand regions per fa\c{c}ade. We compare our modeling scheme to others and evaluate our approach in a user study with an experienced user and several novice users.", month = may, journal = "Computer Graphics Forum (Proceedings of EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "661--670", keywords = "facade modeling, urban modeling, facade reconstruction, image-based modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/musialski-2012-icb/", } @inproceedings{musialski-2012-sur, title = "A Survey of Urban Reconstruction", author = "Przemyslaw Musialski and Peter Wonka and Daniel G. Aliaga and Michael Wimmer and Luc van Gool and Werner Purgathofer", year = "2012", abstract = "This paper provides a comprehensive overview of urban reconstruction. While there exists a considerable body of literature, this topic is still under very active research. The work reviewed in this survey stems from the following three research communities: computer graphics, computer vision, and photogrammetry and remote sensing. Our goal is to provide a survey that will help researchers to better position their own work in the context of existing solutions, and to help newcomers and practitioners in computer graphics to quickly gain an overview of this vast field. Further, we would like to bring the mentioned research communities to even more interdisciplinary work, since the reconstruction problem itself is by far not solved. ", month = may, booktitle = "EUROGRAPHICS 2012 State of the Art Reports", location = "Cagliari, Sardinia, Italy", publisher = "Eurographics Association", series = "EG STARs", pages = "1--28", keywords = "facade modeling, structure from motion, multi-view stereo, urban reconstruction, inverse-procedural modeling, urban modeling, image-based modeling, city reconstruction, state-of-the-art report", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/musialski-2012-sur/", } @mastersthesis{Birsak-Michael-2012, title = "Coloring Meshes of Archaeological Datasets", author = "Michael Birsak", year = "2012", abstract = "Archaeological monuments are nowadays preserved for future generations by means of digitization. To this end, laser scanners in conjunction with registered cameras are used to gather both the geometric and the color information. The geometry is often triangulated to allow a continuous mapping of the photos onto the geometry. The color mapping for high-quality reconstructions of parts of the models is not without problems. In practice, the photos overlap. Now, assuming that a particular triangle receives color information from just one photo, there is a huge number of possibilities to map the photos onto the triangles. This labeling problem is already covered in literature. There are also approaches for the leveling of the remaining seams that arise because of the different lighting situations during the exposure of the photos. In this thesis, we improve common labeling approaches by the introduction of an accurate geometry-based occlusion detection. An octree is used to quickly filter out parts of the model that do not come into consideration for an occlusion anyway. The occlusion detection prevents texturing of parts of the model with image material that does not contain the expected region, but the colors of an occluder. Further, a proposed approach for seam leveling on meshes is improved by the introduction of a new term into the least squares problem that corresponds to the proposed leveling approach. This new term penalizes big leveling function values and helps to keep the leveled color values in the valid range. For better filtering results, we improve the proposed calculation of a 1-pixel wide boundary around the leveled patches by the introduction of outline normals for a user-defined scale of the patches. For easier manual editing of remaining artifacts in the photos, we introduce an application for the generation of alpha masks that indicate regions of the photos that are used for texturing of the 3D model. For the high-performance visualization of 3D models with a huge amount of textures, we make use of virtual texturing. We present an application that generates the needed data structures atlas and tile store in significantly less time than existing scripts. Finally, we show how all the mentioned functionalities are integrated into a visualization application that can support a graphic artist in the post-processing of a digitized archaeological monument.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Birsak-Michael-2012/", } @misc{ohrhallinger_stefan-2012-mle, title = "Minimising Longest Edge for Closed Surface Construction from Unorganised 3D Point Sets", author = "Stefan Ohrhallinger and Sudhir Mudur", year = "2012", abstract = "Given an unorganised 3D point set with just coordinate data, we formulate the problem of closed surface construction as one requiring minimisation of longest edge in triangles, a criterion derivable from Gestalt laws for shape perception. Next we define the Minimum Boundary Complex (BCmin ), which resembles the desired surface Bmin considerably, by slightly relaxing the topological constraint to make it at least two triangles per edge instead of exactly two required by Bmin . A close approximation of BCmin can be computed fast using a greedy algorithm. This provides a very good starting shape which can be transformed by a few steps into the desired shape, close to Bmin. Our method runs in O(n log n) time, with Delaunay Graph construction as largest run-time factor. We show considerable improvement over previous methods, especially for sparse, non-uniform point spacing. ", month = may, publisher = "Eurographics Association", location = "Cagliari, Italy", event = "Eurographics 2012 (Best Poster Award)", booktitle = "Poster Proceedings", Conference date = "Poster presented at Eurographics 2012 (Best Poster Award) (2012-05-13--2012-05-18)", note = "25--26", pages = "25 – 26", keywords = "Point Cloud, Point Set, Reconstruction, Surface Construction", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/ohrhallinger_stefan-2012-mle/", } @WorkshopTalk{Fritz-2012, title = "Landspotting: Social gaming to collect vast amounts of data for satellite validation", author = "Steffen Fritz and Peter Purgathofer and F. Kayali and M. Fellner and Michael Wimmer and Tobias Sturn and Josef Schuh and G. Triebnig and S. Krause and F. Schindler and M. Kollegger and C. Perger and M. D\"{u}rauer and W. Haberl and L. See and Ian McCallum", year = "2012", abstract = "At present there is no single satellite-derived global land cover product that is accurate enough to provide reliable estimates of forest or cropland area to determine, e.g., how much additional land is available to grow biofuels or to tackle problems of food security. The Landspotting Project aims to improve the quality of this land cover information by vastly increasing the amount of in-situ validation data available for calibration and validation of satellite-derived land cover. The Geo-Wiki (Geo-Wiki.org) system currently allows users to compare three satellite derived land cover products and validate them using Google Earth. However, there is presently no incentive for anyone to provide this data so the amount of validation through Geo-Wiki has been limited. However, recent competitions have proven that incentive driven campaigns can rapidly create large amounts of input. The LandSpotting Project is taking a truly innovative approach through the development of the Landspotting game. The game engages users whilst simultaneously collecting a large amount of in-situ land cover information. The development of the game is informed by the current raft of successful social gaming that is available on the internet and as mobile applications, many of which are geo-spatial in nature. Games that are integrated within a social networking site such as Facebook illustrate the power to reach and continually engage a large number of individuals. The number of active Facebook users is estimated to be greater than 400 million, where 100 million are accessing Facebook from mobile devices. The Landspotting Game has similar game mechanics as the famous strategy game "Civilization" (i.e. build, harvest, research, war, diplomacy, etc.). When a player wishes to make a settlement, they must first classify the land cover over the area they wish to settle. As the game is played on the earth surface with Google Maps, we are able to record and store this land cover/land use classification geographically. Every player can play the game for free (i.e. a massive multiplayer online game). Furthermore, it is a social game on Facebook (e.g. invite friends, send friends messages, purchase gifts, help friends, post messages onto the wall, etc). The game is played in a web browser, therefore it runs everywhere (where Flash is supported) without requiring the user to install anything additional. At the same time, the Geo-Wiki system will be modified to use the acquired in-situ validation information to create new outputs: a hybrid land cover map, which takes the best information from each individual product to create a single integrated version; a database of validation points that will be freely available to the land cover user community; and a facility that allows users to create a specific targeted validation area, which will then be provided to the crowdsourcing community for validation. These outputs will turn Geo-Wiki into a valuable system for earth system scientists. ", month = apr, event = "European Geosciences Union General Assembly 2012", location = "Austria Center Vienna, Session ESSI2.9, room 7 ", keywords = "Social Games with Purpose, Landspotting", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Fritz-2012/", } @techreport{TR-186-2-12-01, title = "Interactive Screen-Space Triangulation for High-Quality Rendering of Point Clouds", author = "Reinhold Preiner and Michael Wimmer", year = "2012", abstract = "This technical report documents work that is a precursor to the Auto Splatting technique. We present a rendering method that reconstructs high quality images from unorganized colored point data. While previous real-time image reconstruction approaches for point clouds make use of preprocessed data like point radii or normal estimations, our algorithm only requires position and color data as input and produces a reconstructed color image, normal map and depth map which can instantly be used to apply further deferred lighting passes. Our method performs a world-space neighbor search and a subsequent normal estimation in screen-space, and uses the geometry shader to triangulate the color, normal and depth information of the points. To achieve correct visibility and closed surfaces in the projected image a temporal coherence approach reuses triangulated depth information and provides adaptive neighbor search radii. Our algorithm is especially suitable for insitu high-quality visualization of big datasets like 3D-scans, making otherwise time-consuming preprocessing steps to reconstruct surface normals or point radii dispensable.", month = apr, number = "TR-186-2-12-01", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "screen triangulation, point rendering, nearest neighbors, screen-space, point clouds", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/TR-186-2-12-01/", } @habilthesis{Bruckner-2012-VEA, title = "Visual Exploration and Analysis of Volumetric Data", author = "Stefan Bruckner", year = "2012", abstract = "Information technology has led to a rapid increase in the amount of data that arise in areas such as biology, medicine, climate science, and engineering. In many cases, these data are volumetric in nature, i.e., they describe the distribution of one or several quantities over a region in space. Volume visualization is the field of research which investigates the transformation of such data sets into images for purposes such as understanding structure or identifying features. This thesis presents work to aid this process by improving the interactive depiction, analysis, and exploration of volumetric data.", month = apr, keywords = "visual analysis, visual exploration, volume data", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Bruckner-2012-VEA/", } @inproceedings{reisner-2012-iwssip, title = "Segmenting Multiple Range Images with Primitive Shapes", author = "Irene Reisner-Kollmann and Stefan Maierhofer", year = "2012", abstract = "We introduce a novel method for automatically segmenting multiple registered range images by detecting and optimizing geometric primitives. The resulting shapes provide high level information about scanned objects and are a valuable input for surface reconstruction, hole filling, or shape analysis. We begin by generating a global graph of sample points covering all input frames. The graph structure allows to compute a globally consistent segmentation with a memory and time-efficient solution, even for large sets of input images. We iteratively detect shapes with a Ransac-approach, optimize the assignments of graph nodes to shapes, and optimize the shape parameters. Finally, pixel-accurate segmentations can be extracted for each source image individually. By using range images instead of unstructured point clouds as input, we can exploit additional information such as connectivity or varying precision of depth measurements.", month = apr, isbn = "978-3-200-02588-2", location = "Vienna", booktitle = "Proceedings of 19th International Conference on Systems, Signals and Image Processing (IWSSIP 2012)", keywords = "surface fitting, range data, segmentation, shape detection", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/reisner-2012-iwssip/", } @incollection{MATTAUSCH-2012-EOV, title = "Efficient Online Visibility for Shadow Maps", author = "Oliver Mattausch and Jir\'{i} Bittner and Ari Silvennoinen and Daniel Scherzer and Michael Wimmer", year = "2012", abstract = "Standard online occlusion culling is able to vastly improve the rasterization performance of walkthrough applications by identifying large parts of the scene as invisible from the camera and rendering only the visible geometry. However, it is of little use for the acceleration of shadow map generation (i.e., rasterizing the scene from the light view [Williams 78]), so that typically a high percentage of the geometry will be visible when rendering shadow maps. For example, in outdoor scenes typical viewpoints are near the ground and therefore have significant occlusion, while light viewpoints are higher up and see most of the geometry. Our algorithm remedies this situation by quickly detecting and culling the geometry that does not contribute to the shadow in the final image.", month = feb, booktitle = "GPU Pro 3: Advanced Rendering Techniques", editor = "Wolfgang Engel", isbn = "978-1439887820", publisher = "CRC Press", keywords = "shadow maps, visibility culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/MATTAUSCH-2012-EOV/", } @inproceedings{Purgathofer_2012_IVA, title = "INTERACTIVE VISUAL ANALYSIS OF INTENSIVE CARE UNIT DATA: Relationship Between Serum Sodium Concentration, its Rate of Change, and Survival Outcome", author = "Kresimir Matkovic and H. Gan and Andreas Ammer and D. Bennett and Werner Purgathofer and Marius Terblanche", year = "2012", abstract = "In this paper we present a case study of interactive visual analysis and exploration of a large ICU data set. The data consists of patients’ records containing scalar data representing various patients’ parameters (e.g. gender, age, weight), and time series data describing logged parameters over time (e.g. heart rate, blood pressure). Due to the size and complexity of the data, coupled with limited time and resources, such ICU data is often not utilized to its full potential, although its analysis could contribute to a better understanding of physiological, pathological and therapeutic processes, and consequently lead to an improvement of medical care. During the exploration of this data we identified several analysis tasks and adapted and improved a coordinated multiple views system accordingly. Besides a curve view which also supports time series with gaps, we introduced a summary view which allows an easy comparison of subsets of the data and a box plot view in a coordinated multiple views setup. Furthermore, we introduced an inverse brush, a secondary brush which automatically selects non-brushed items, and updates itself accordingly when the original brush is modified. The case study describes how we used the system to analyze data from 1447 patients from the ICU at Guy’s & St. Thomas’ NHS Foundation Trust in London. We were interested in the relationship between serum sodium concentration, its rate of change and their effect on ICU mortality rates. The interactive visual analysis led us to findings which were fascinating for medical experts, and which would be very difficult to discover using conventional analysis methods usually applied in the medical field. The overall feedback from domain experts (coauthors of the paper) is very positive.", month = feb, location = "Rome, Italy", event = "IVAPP 2012", booktitle = "Proceeding of IVAAP 2012", journal = "Proceedings of IVAPP 2012 / SciTePress", pages = "648--659", keywords = "Coordinated Multiple Views, Intensive Care Unit Data, Interactive Visual Analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Purgathofer_2012_IVA/", } @article{liu-2012-tcvd, title = "Tensor Completion for Estimating Missing Values in Visual Data", author = "Ji Liu and Przemyslaw Musialski and Peter Wonka and Jieping Ye", year = "2012", abstract = "In this paper we propose an algorithm to estimate missing values in tensors of visual data. Our methodology is built on recent studies about matrix completion using the matrix trace norm. The contribution of our paper is to extend the matrix case to the tensor case by proposing the first definition of the trace norm for tensors and then by building a working algorithm. First, we propose a definition for the tensor trace norm, that generalizes the established definition of the matrix trace norm. Second, similar to matrix completion, the tensor completion is formulated as a convex optimization problem. We developed three algorithms: SiLRTC, FaLRTC, and HaLRTC. The SiLRTC algorithm is simple to implement and employs a relaxation technique to separate the dependant relationships and uses the block coordinate descent (BCD) method to achieve a globally optimal solution; The FaLRTC algorithm utilizes a smoothing scheme to transform the original nonsmooth problem into a smooth one; The HaLRTC algorithm applies the alternating direction method of multipliers (ADMM) to our problem. Our experiments show potential applications of our algorithms and the quantitative evaluation indicates that our methods are more accurate and robust than heuristic approaches.", month = jan, issn = "0162-8828", journal = "IEEE Transactions on Pattern Analysis & Machine Intelligence (PAMI)", number = "1", volume = "35", pages = "208--220", keywords = "matrix completion, trace norm, tensor completion", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/liu-2012-tcvd/", } @xmascard{xmas-2012, title = "X-Mas Card 2012", author = "Peter Mindek", year = "2012", abstract = "The foreground is dominated by a hand-drawn Christmas-decorated Mayan calendar. The Christmas ornaments are ray-traced. The sky is created by combining a visualization of the hurricane Isabel dataset with a hand-drawn background. The visualization of the hurricane dataset is a combination of several direct volume rendered images of various scalar fields. The Vienna skyline is composed of curves while a simple procedural texture is used to generate the windows. The flames are hand-drawn. Im Vordergrund dominiert ein handgezeichneter, weihnachtlich verzierter, Maya-Kalender. Die Weihnachtsornamente werden mittels Ray-Tracing dargestellt. Der Himmel setzt sich aus der Visualisierung des Isabel-Wirbelsturms und einem handgezeichneten Hintergrund zusammen. Die Visualisierung des Wirbelsturms ist eine Kombination aus verschiedenen Bildern, welche mittels direktem Volumenrendering unterschiedlicher Skalarfelder erzeugt wurden. Die Silhouette Wiens ist eine Zusammensetzung von Kurven, wobei f\"{u}r die Generierung der Fenster eine einfache prozedurale Textur benutzt wurde. Die Flammen sind handgezeichnet.", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/xmas-2012/", } @inproceedings{Ford-2012-HRV, title = "HeartPad: Real-Time Visual Guidance for Cardiac Ultrasound", author = "Steven Ford and Gabriel Kiss and Ivan Viola and Stefan Bruckner and Hans Torp", year = "2012", abstract = "Medical ultrasound is a challenging modality when it comes to image interpretation. The goal we address in this work is to assist the ultrasound examiner and partially alleviate the burden of interpretation. We propose to address this goal with visualization that provides clear cues on the orientation and the correspondence between anatomy and the data being imaged. Our system analyzes the stream of 3D ultrasound data and in real-time identifies distinct features that are basis for a dynamically deformed mesh model of the heart. The heart mesh is composited with the original ultrasound data to create the data-to-anatomy correspondence. The visualization is broadcasted over the internet allowing, among other opportunities, a direct visualization on the patient on a tablet computer. The examiner interacts with the transducer and with the visualization parameters on the tablet. Our system has been characterized by domain specialist as useful in medical training and for navigating occasional ultrasound users.", booktitle = "Proceedings of the Workshop at SIGGRAPH Asia 2012", keywords = "medical visualization, ultrasound", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Ford-2012-HRV/", } @incollection{Reichinger-12, title = " Computer-Aided Design of Tactile Models – Taxonomy and Case-Studies", author = "Andreas Reichinger and Florian Rist and M. Neum\"{u}ller and Stefan Maierhofer and Werner Purgathofer", year = "2012", abstract = "Computer-aided tools offer great potential for the design and production of tactile models. While many publications focus on the design of essentially two-dimensional media like raised line drawings or the reproduction of three-dimensional objects, we intend to broaden this view by introducing a taxonomy that classifies the full range of conversion possibilities based on dimensionality. We present an overview of current methods, discuss specific advantages and difficulties, identify suitable programs and algorithms and discuss personal experiences from case studies performed in cooperation with two museums.", booktitle = "Computers Helping People with Special Needs", chapter = "13th International Conference, ICCHP 2012, Linz, Austria, July 11-13, 2012, Proceedings, Part II", editor = "K. Miesenberger, A. Karshmer, P. Penaz, W. Zagler", isbn = "978-3-642-31533-6", publisher = "Springer Berlin-Heidelberg", series = "Lecture Notes in Computer Science", volume = "7383", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Reichinger-12/", } @studentproject{WEBER-2012-MIC, title = "Micropolis", author = "Thomas Weber", year = "2012", abstract = "Micropolis is a micropolygon rasterizer implemented in OpenCL. It uses the REYES algorithm to rasterize curved surfaces. This is done by splitting the surface into sub-pixel sized polygons (micropolygons) and rasterizing them. This allows the rendering of highly detailed, displaced surfaces. The dicing, shading and rasterization of the micropolygons is implemented in OpenCL. The rasterizer fills a framebuffer that is then rendered as texture in OpenGL. There also exists an alternative render backend that uses OpenGL hardware tessellation for performance comparison. The code is available on github: https://github.com/ginkgo/micropolis A video is available on youtube: http://www.youtube.com/watch?v=09ozb1ttgmA ", keywords = "micropolygon rendering, reyes", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/WEBER-2012-MIC/", } @mastersthesis{ludwig-2012-MT, title = "Radial Diagrams for the Visual Analysis of Wind Energy Production Data", author = "Wolfgang Ludwig", year = "2018", abstract = "Wind energy production is a fast growing sector in the field of renewable energy production. In the process of energy production, more and more data is produced and recorded every year. This data is usually worthless without further exploration, analysis, and presentation. This thesis presents a design study of the visual analysis of wind energy production data. The goal is to provide data analysts with tools to carry out common tasks in the field of wind energy production more efficiently. As the data commonly contains directional information of winds and gusts, analysis techniques need to take the circular nature of such data into account. This work proposes a set of techniques for the visualization and interaction with circular data in radial diagrams. The diagrams operate in the polar coordinate system and thus are well suited to solve the problems of maintaining the natural coherence and circular closure of circular data. The thesis discusses important design decisions and gives practical guidance how to implement novel features into an existing software system. Implementation details on how to ensure large data scalability are presented. The work evaluates the results in a case study with real data carried out by an expert in the field of wind energy production. The results indicate an improved work flow of common tasks and a successful system integration. The reported deployment at a national power grid operator further demonstrates the system’s user acceptance and importance. The thesis also reflects on the iterative design process and the within collected expert feedback.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/ludwig-2012-MT/", } @phdthesis{muigg-2012-svr, title = "Scalability for Volume Rendering and Information Visualization Approaches in the Context of Scientific Data", author = "Philipp Muigg", year = "2012", abstract = "Data from numerical simulations that model physical processes has to be explored and analyzed in a broad range of different fields of research and development. Besides data mining and statistics, visualization is among the most important methods that grant domain experts insight into their complex simulation results. In order to keep up with ongoing improvements of simulation methods as well as ever increasing amounts of data, state-of-the-art visualization techniques have to be scalable with respect to many different properties. Many numerical models rely on a domain decomposition defined by a volumetric grid. Finer grids yield more accurate simulation results at the cost of longer computing times. The wide availability of high-performance computing resources has resulted in increasingly detailed data sets. The first volume rendering approach that is presented in this thesis uses bricking and resampling to cope with such high resolution data. Important regions of the simulated volume are visualized in as much detail as possible whereas lower resolution representations are used for less important portions of a data set. This allows for interactive frame rates even when dealing with the highly detailed grids that are used by state-of-the-art simulation models. Grid resolution, however, is only one aspect that has increased due to the ongoing development of numerical methods. Grid complexity has increased as well. While initial simulation techniques have required simple tetrahedral meshes current methods can cope with polyhedral cells that allow for increased solver efficiency and simulation accuracy. The second volume visualization algorithm that is presented in this thesis is scalable with respect to grid complexity since it is capable of directly visualizing data defined on grids which comprise polyhedral cells. Raycasting is performed by using a novel data structure that allows for easy grid traversal while retaining a very compact memory footprint. Both aforementioned volume rendering techniques utilize the massively parallel computing resources that are provided by modern graphics processing units. Many information visualization methods are designed to explore and analyze abstract data that is often high dimensional. Since improvements in the field of numerical modelling have led to simulation data sets that contain a large number of physical attributes the application of techniques from the field of information visualization can provide additional important information to domain experts. However, in order to apply information visualization methods to scientific data such as numerical simulation results, additional scalability issues have to be addressed. This thesis introduces multiple methods that can be used to reduce cluttering and overdrawing problems for line-based techniques such as phase-space diagrams, parallel coordinates and a novel time-series visualization. The trajectories of important trends in the data are illustrated by blurring a noise texture along them. A novel coloring scheme is used to provide visual linking-information across multiple visualizations in a multi-view framework. The proposed approaches are primarily image-based which makes them very scalable with respect to data set sizes. The usefulness and real-world applicability of the techniques that are introduced in this thesis is demonstrated in a case study. A complex computational fluid dynamics data set, which contains several simulated breathing cycles within the human upper respiratory tract, is analyzed. The exploration of the data has yielded several hypothesis that are of importance to an ENT specialist. Many of the techniques presented in this work have also been used in the context of additional collaborations in a multitude of fields such as medicine, climatology, meteorology, and engineering.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/muigg-2012-svr/", } @talk{Groeller_2012_VV, title = " Variability in Visualization", author = "Eduard Gr\"{o}ller", year = "2012", event = "Computer Graphics and HCI Group, Technische Universit\"{a}t Kaiserslautern", location = "Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Groeller_2012_VV/", } @article{Ribicic_2012_VAS, title = "Visual analysis and steering of flooding simulations", author = "Hrvoje Ribi\v{c}i\'{c} and J\"{u}rgen Waser and Raphael Fuchs and G\"{u}nter Bl\"{o}schl and Eduard Gr\"{o}ller", year = "2012", abstract = "We present a visualization tool for the real-time analysis of interactively steered ensemble-simulation runs, and apply it to flooding simulations. Simulations are performed on-the-fly, generating large quantities of data. The user wants to make sense of the data as it is created. The tool facilitates understanding: of what happens in all scenarios, where important events occur and how simulation runs are related. We combine different approaches to achieve this goal. To maintain an overview, data is aggregated and embedded into the simulation rendering, showing trends, outliers, and robustness. For a detailed view, we use information-visualization views and interactive visual analysis techniques. A selection mechanism connects the two approaches. Points of interest are selected by clicking on aggregates, supplying data for visual analysis. This allows the user to maintain an overview of the ensemble and perform analysis even as new data is supplied through simulation steering. Unexpected or unwanted developments are detected easily, and the user can focus the exploration on them. The solution was evaluated with two case studies focusing on placing and testing flood defense measures. Both were evaluated by a consortium of flood simulation and defense experts, who found the system to be both intuitive and relevant.", journal = "IEEE Transaction on Visualization and Computer Graphics", number = "99", volume = "PP", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Ribicic_2012_VAS/", } @inproceedings{EISEMANN-2012-ERT, title = "Efficient Real-Time Shadows", author = "Elmar Eisemann and Ulf Assarsson and Michael Schwarz and Michal Valient and Michael Wimmer", year = "2012", abstract = "This course is a resource for applying efficient, real-time shadow algorithms. It builds on a solid foundation (previous courses at SIGGRAPH Asia 2009 and Eurographics 2010, including comprehensive course notes) and the 2011 book Real-Time Shadows (AK Peters) written by four of the presenters. The book is a compendium of many topics in the realm of shadow computation.", booktitle = "ACM SIGGRAPH 2012 Courses", isbn = "978-1-4503-1678-1", location = "Los Angeles, CA", publisher = "ACM", pages = "18:1--18:53", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/EISEMANN-2012-ERT/", } @talk{Groeller_2012_KAV, title = "Knowledge-Assisted Visualization and Biopsy Planning", author = "Eduard Gr\"{o}ller", year = "2012", event = "Mini-Symposium on Medical Visualization, University of Bergen", location = "Norway", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Groeller_2012_KAV/", } @talk{Groeller_2012_VBP, title = "Visualization in Biopsy Planning", author = "Eduard Gr\"{o}ller", year = "2012", event = "Medical Visualization Minisymposium, Eindhoven University of Technology", location = "The Netherlands", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Groeller_2012_VBP/", } @talk{Groeller_2012_ViV, title = "Variability in Visualization", author = "Eduard Gr\"{o}ller", year = "2012", event = "Computer Graphics and Visualization Group, Technische Universit\"{a}t M\"{u}nchen", location = "Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Groeller_2012_ViV/", } @studentproject{dellmour-2012-mcr, title = "Determining the comfort range of customer stereoscopic displays", author = "Camillo Dellmour", year = "2012", abstract = "An experiment from vision literature was reproduced to investigate whether and how accurate we can determine the comfort zone where the vergence-accomodation conflict is low for customer stereo displays (anaglyph + autostereo). ", keywords = "Vergence-accomodation conflict, Stereoscopy", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/dellmour-2012-mcr/", } @bachelorsthesis{FISCHL-2012-CTASEG, title = "Parallelized Segmentation of CT-Angiography Datasets Using CUDA", author = "Daniel Fischl", year = "2012", abstract = "Segmentation of CT-Angiography datasets is an important and difficult task. Several algorithms and approaches have already been invented and implemented to solve this problem. In this work, we present automatic algorithms for the segmentation of these CTA datasets, implemented in CUDA, and evaluate our results regarding speed and error rates. Starting with local approaches like thresholding we pro- ceed to global, object-based algorithms, like region growing and a newly developed algorithm based on dual energy CT scans (DECT), the XOR-Algorithm, presented by Karimov et al.[6] A limitation of using graphics hardware is the restricted amount of memory, which led us to use a slab-based processing approach (see section 5.3). The requirement of this work was a complete GPU implementation. But since not every task is appropriate for parallelizing, it was necessary to use iteratively parallel algorithms. This strategy though introduced speed problems that had to be analyzed and were partly solved. This work presents the principle of these GPU methods and compares them to their CPU counterparts. In the end, the quality of each algorithm is analyzed and they are compared against each other, in order to find an acceptable completely automatic segmentation algorithm for distinguishing between different types of tissues (e.g. vessels, bones, soft tissue, ...).", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/FISCHL-2012-CTASEG/", } @studentproject{Tragust-2012-GMHPC, title = "Graph Models for Guided Point Cloud Navigation", author = "Markus Tragust", year = "2012", abstract = "Navigation in huge point cloud models can be difficult to the extent that users get lost within the point cloud. In this project graphs are manually defined that describe major paths through the model that the user cannot leave. The user is guided along the paths to the most important areas of the point cloud.", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Tragust-2012-GMHPC/", } @studentproject{trumpf_stefan_2012, title = "Labeling and Leveling of Meshes", author = "Stefan Trumpf", year = "2012", abstract = "The aim of this project was the extension of the functional range of an existing library used for labeling. Labeling in this context refers to the problem of texturing a mesh of an archaeological item, given a huge set of photos such that there is a vast amount of possibilities to choose a suitable photo for each triangle. The first step was the implementation of a masking-feature. This feature is necessary, since not all regions in the photos are equivalently suited to texture the mesh. The masks are provided by the user of the library and show the regions in the photos that can be used for texturing of the model. The second step was the implementation of an interacitve brushing tool. This brushing tool enables the user to enlarge the region that is textured by a particular photo (growing brush), or to replace the current photo that is used for texturing of a particular region with the photo, which is the next optimal photo suitable for texturing. The brushing tool was implemented into Scanopy, a point cloud editor that is developed at the institute.", keywords = "Meshes, Leveling, Labeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/trumpf_stefan_2012/", } @inproceedings{mistelbauer-2012-cr, title = "Centerline Reformations of Complex Vascular Structures", author = "Gabriel Mistelbauer and Andrej Varchola and Hamed Bouzari and Juraj Starinsky and Arnold K\"{o}chl and R\"{u}diger Schernthaner and Dominik Fleischmann and Eduard Gr\"{o}ller and Milo\v{s} \v{S}r\'{a}mek", year = "2012", abstract = "Visualization of vascular structures is a common and frequently performed task in the field of medical imaging. There exist well established and applicable methods such as Maximum Intensity Projection (MIP) and Curved Planar Reformation (CPR). However, when calcified vessel walls are investigated, occlusion hinders exploration of the vessel interior with MIP. In contrast, CPR offers the possibility to visualize the vessel lumen by cutting a single vessel along its centerline. Extending the idea of CPR, we propose a novel technique, called Centerline Reformation (CR), which is capable of visualizing the lumen of spatially arbitrarily oriented vessels not necessarily connected in a tree structure. In order to visually emphasize depth, overlap and occlusion, halos can optionally envelope the vessel lumen. The required vessel centerlines are obtained from volumetric data by performing a scale-space based feature extraction. We present the application of the proposed technique in a focus and context setup. Further, we demonstrate how it facilitates the investigation of dense vascular structures, particularly cervical vessels or vessel data featuring peripheral arterial occlusive diseases or pulmonary embolisms. Finally, feedback from domain experts is given.", isbn = "978-1-4673-0863-2", location = "Songdo, Korea (South) ", booktitle = "Pacific Visualization Symposium (PacificVis), 2012 IEEE", pages = "233--240", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/mistelbauer-2012-cr/", } @talk{musialski_2012_aachen, title = "Image-Based Approaches for Facade Reconstruction", author = "Przemyslaw Musialski", year = "2012", abstract = "Modeling and reconstruction of urban environments is currently the subject of intensive research. There is a wide range of possible applications, like cyber-tourism, computer games, and the entertainment industries in general, as well as urban planning, architecture, traffic simulation, driving guidance, to name but a few. In this talk I will present one specific subfield of urban reconstruction: image-based facade modeling and reconstruction. In particular I will provide an overview of recent automatic and interactive approaches which aim at the decomposition of facade imagery in order to generate well defined geometric models.", event = "Invited talk at RWTH Aachen University, Computer Graphics Group", location = "RWTH Aachen, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/musialski_2012_aachen/", } @studentproject{manpreet_kainth-2012-rus, title = "Rendering of Urban Scenes", author = "Manpreet Kainth", year = "2012", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/manpreet_kainth-2012-rus/", } @talk{mindek_peter-2012-visgroupproj, title = "VisGroup: visualization research projects", author = "Peter Mindek", year = "2012", event = "Vision and Graphics Group Seminar", location = "Slovak University of Technology in Bratislava, Faculty of Informatics and Information Technologies", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/mindek_peter-2012-visgroupproj/", }