@article{vaico, title = "VAICo: Visual Analysis for Image Comparison", author = "Johanna Schmidt and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2013", abstract = "Scientists, engineers, and analysts are confronted with ever larger and more complex sets of data, whose analysis poses special challenges. In many situations it is necessary to compare two or more datasets. Hence there is a need for comparative visualization tools to help analyze differences or similarities among datasets. In this paper an approach for comparative visualization for sets of images is presented. Well-established techniques for comparing images frequently place them side-by-side. A major drawback of such approaches is that they do not scale well. Other image comparison methods encode differences in images by abstract parameters like color. In this case information about the underlying image data gets lost. This paper introduces a new method for visualizing differences and similarities in large sets of images which preserves contextual information, but also allows the detailed analysis of subtle variations. Our approach identifies local changes and applies cluster analysis techniques to embed them in a hierarchy. The results of this process are then presented in an interactive web application which allows users to rapidly explore the space of differences and drill-down on particular features. We demonstrate the flexibility of our approach by applying it to multiple distinct domains.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "19", number = "12", note = "Demo: http://www.cg.tuwien.ac.at/~jschmidt/vaico/", pages = "2090--2099", keywords = "focus+context, image-set comparison, Comparative visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/vaico/", } @article{reh-2013, title = "MObjects - A Novel Method for the Visualization and Interactive Exploration of Defects in Industrial XCT Data", author = "Andreas Reh and Christian Gusenbauer and Johann Kastner and Eduard Gr\"{o}ller and Christoph Heinzl", year = "2013", abstract = "This paper describes an advanced visualization method for the analysis of defects in industrial 3D X-Ray Computed Tomography (XCT) data. We present a novel way to explore a high number of individual objects in a dataset, e.g., pores, inclusions, particles, fibers, and cracks demonstrated on the special application area of pore extraction in carbon fiber reinforced polymers (CFRP). After calculating the individual object properties volume, dimensions and shape factors, all objects are clustered into a mean object (MObject). The resulting MObject parameter space can be explored interactively. To do so, we introduce the visualization of mean object sets (MObject Sets) in a radial and a parallel arrangement. Each MObject may be split up into sub-classes by selecting a specific property, e.g., volume or shape factor, and the desired number of classes. Applying this interactive selection iteratively leads to the intended classifications and visualizations of MObjects along the selected analysis path. Hereby the given different scaling factors of the MObjects down the analysis path are visualized through a visual linking approach. Furthermore the representative MObjects are exported as volumetric datasets to serve as input for successive calculations and simulations. In the field of porosity determination in CFRP non-destructive testing practitioners use representative MObjects to improve ultrasonic calibration curves. Representative pores also serve as input for heat conduction simulations in active thermography. For a fast overview of the pore properties in a dataset we propose a local MObjects visualization in combination with a color-coded homogeneity visualization of cells. The advantages of our novel approach are demonstrated using real world CFRP specimens. The results were evaluated through a questionnaire in order to determine the practicality of the MObjects visualization as a supportive tool for domain specialists.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics (Proceedings of IEEE Scientific Visualization 2013)", volume = "19", number = "12", pages = "2906--2915", keywords = "porosity, carbon fiber reinforced polymers, parameter space analysis, MObjects, 3D X-ray computed tomography", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/reh-2013/", } @article{kehrer-2013-SBC, title = "A Model for Structure-based Comparison of Many Categories in Small-Multiple Displays", author = "Johannes Kehrer and Harald Piringer and Wolfgang Berger and Eduard Gr\"{o}ller", year = "2013", abstract = "Many application domains deal with multi-variate data that consists of both categorical and numerical information. Small-multiple displays are a powerful concept for comparing such data by juxtaposition. For comparison by overlay or by explicit encoding of computed differences, however, a specification of references is necessary. In this paper, we present a formal model for defining semantically meaningful comparisons between many categories in a small-multiple display. Based on pivotized data that are hierarchically partitioned by the categories assigned to the x and y axis of the display, we propose two alternatives for structure-based comparison within this hierarchy. With an absolute reference specification, categories are compared to a fixed reference category. With a relative reference specification, in contrast, a semantic ordering of the categories is considered when comparing them either to the previous or subsequent category each. Both reference specifications can be defined at multiple levels of the hierarchy (including aggregated summaries), enabling a multitude of useful comparisons. We demonstrate the general applicability of our model in several application examples using different visualizations that compare data by overlay or explicit encoding of differences.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "19", number = "12", pages = "2287--2296", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/kehrer-2013-SBC/", } @phdthesis{knecht_2013_RSM, title = "Reciprocal Shading for Mixed Reality", author = "Martin Knecht", year = "2013", abstract = "Reciprocal shading for mixed reality aims to integrate virtual objects into real environments in a way that they are in the ideal case indistinguishable from real objects. It is therefore an attractive technology for architectural visualizations, product visualizations and for cultural heritage sites, where virtual objects should be seamlessly merged with real ones. Due to the improved performance of recent graphics hardware, real-time global illumination algorithms are feasible for mixed-reality applications, and thus more and more researchers address realistic rendering for mixed reality. The goal of this thesis is to provide algorithms which improve the visual plausibility of virtual objects in mixed-reality applications. Our contributions are as follows: First, we present five methods to reconstruct the real surrounding environment. In particular, we present two methods for geometry reconstruction, a method for material estimation at interactive frame rates and two methods to reconstruct the color mapping characteristics of the video see-through camera. Second, we present two methods to improve the visual appearance of virtual objects. The first, called differential instant radiosity, combines differential rendering with a global illumination method called instant radiosity to simulate reciprocal shading effects such as shadowing and indirect illumination between real and virtual objects. The second method focuses on the visual plausible rendering of reflective and refractive objects. The high-frequency lighting effects caused by these objects are also simulated with our method. The third part of this thesis presents two user studies which evaluate the influence of the presented rendering methods on human perception. The first user study measured task performance with respect to the rendering mode, and the second user study was set up as a web survey where participants had to choose which of two presented images, showing mixed-reality scenes, they preferred.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/knecht_2013_RSM/", } @bachelorsthesis{koehle-2013-sgv, title = "Spatiotemporal genealogy visualization", author = "Dominik K\"{o}hle", year = "2013", abstract = "The focus of conventional genealogy visualization techniques lies on showing genera- tional and perhaps temporal relationships. A family tree, for instance, perfectly shows dates of birth and death of a person, who is the child of whom, as well as marriages and divorces. But to fully understand an individual’s life, it is important to know some more facts: Where was the person born? When did she or he leave home? How often and where to did that person move? Where did people meet? We propose a combination of a space-time cube and a timeline visualization to be able to actually see a person’s life. The timeline visualization shows generational and temporal relationships, where the space-time cube visualizes spatial relationships and movements of a single person as well as those of whole families. Event clouds (additional information layers for historic events) can further show possible connections between such events and migration of a person/family. ", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "spatio-temporal visualization, genealogy ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/koehle-2013-sgv/", } @bachelorsthesis{hauer_alex-2013-ba, title = "Physics-based Music Visualization", author = "Alex Hauer", year = "2013", abstract = "The aim of this bachelor’s thesis is to point out ways on how to extract distinct bits of information out of a song and how to combine them to create single parameters that reflect the currently transported emotion of the song. It presents approaches on how to extract certain information and data from MIDI and audio files that can then be used to create a more physics-based and naturally feeling visualization than the one that gets shipped with today’s common music player software, with a strong focus on MIDI. For example, the currently used scale should have an impact on the visualization’s color, as well as the current tempo, dynamic or aggressivity. Representing these attributes as input parameters that can be used by a visualization application should ultimately result in a better visualization experience for the viewer, because it creates a feeling that the things seen on screen match with the music currently playing. Besides defining such input parameters for visualizations, this paper also provides a short evaluation of music feature extraction libraries and frameworks that help in reaching the men- tioned goal, as well as a few concrete implementations of algorithms that can be used to extract such features based on the jMusic API framework.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "feature extraction, music, visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/hauer_alex-2013-ba/", } @inproceedings{Calatrava_2013_GPGPUSOA, title = "General-Purpose Graphics Processing Units in Service-Oriented Architectures", author = "Mar\'{i}a del Carmen Calatrava Moreno and Thomas Auzinger", year = "2013", abstract = "Over the last decades, graphics processing units have developed from special-purpose graphics accelerators to general-purpose massively parallel co-processors. In recent years they gained increased traction in high performance computing as they provide superior computational performance in terms of runtime and energy consumption for a wide range of problems. In this survey, we review their employment in distributed computing for a broad range of application scenarios. Common characteristics and a classification of the most relevant use cases are described. Furthermore, we discuss possible future developments of the use of general purpose graphics processing units in the area of service-oriented architecture. The aim of this work is to inspire future research in this field and to give guidelines on when and how to incorporate this new hardware technology.", month = dec, isbn = "978-1-4799-2701-2", series = "SOCA ", publisher = "IEEE Computer Society", organization = "IEEE Computer Society", location = "Kauai", booktitle = "Proceedings of the 6th IEEE International Conference on Service Oriented Computing and Applications", keywords = "GPGPU, SOA, parallel, graphics processors, GPU, service-oriented architectures, throughput computing, survey, future", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Calatrava_2013_GPGPUSOA/", } @bachelorsthesis{giefing_juergen-2013-ba, title = "Physics-based Music Visualization", author = "Juergen Giefing", year = "2013", abstract = "Historically, there have been a number of approaches to analyze and explain the relationship between music and visual elements, particularly colors. Arnheim [1] has done important work in that context, as well as Palmer and Schloss [10] [16] [14] [13]. Palmer has shown in his ex- periments, that music and color are coupled through emotion, like Arnheim had assumed before. The goal of this thesis was to investigate this connection more in detail by considering also other visual parameters like motion, shape or size and to implement a prototype of a visualization application based on the insights gathered during our user studies. This application should be able to visualize music based on a flexible mapping and psychological knowledge. During the first part of our user studies, test persons were asked to rate parts of songs as well as animations without sound independent from each other but using the same rating scales. The results show strong correlations between single attributes and the perception of the test persons. During the second part of the user studies, the test persons were asked to rate the accordance between the songs from the first round and the visualizations created based on the results of the first round. Our assumptions could not be confirmed in that experiment. We try to determine the reasons, why the results of the second round were not as expected and what steps could be taken to refine our approach and implement it in a successful manner.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "music, visualization, emotions", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/giefing_juergen-2013-ba/", } @bachelorsthesis{schmid_andreas_2013-prj, title = "Physics-based Music Visualization", author = "Andreas Schmid", year = "2013", abstract = "The aim of our bachelor thesis was to develop a concept on the basic structure of software that visualizes human emotions when listening to music into graphic primitives. Our prototype of this visual media player allows the user to read data from a music file and map this data into a file that converts it to special forms, colors and transformations. This goal was accomplished with special commands and a modifiable theme. With these tools, the user can completely control the visualization. The commands describe the options of what can be accomplished with the software. In our case, they create primitive forms, move them, and change their color. The theme shows what the program is able to display. Two examples of this prototype are the textual theme and the graphical theme. In the first user study, the users were asked to listen to short music files or watch some short animations and to evaluate those. This data was then transferred to a mapping file and taken as basis for a second user study. In the second user study, the users had to evaluate five media players, including ours, using two different mappings - one was their visualization and the other an evaluation on if music and visualization match. The results were not as expected - the users evaluated the existing media players better than our prototype in visualization-music mapping. We are analyzing the results and working on redesigning our algorithms in order to have a more successful prototype. In this bachelor thesis, there is also an analysis of the libraries used. The prototype can be used as a basis for future work in this field.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "sound, visualization, physics-based", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/schmid_andreas_2013-prj/", } @article{ohrhallinger_stefan-2013-c2d, title = "An Efficient Algorithm for Determining an Aesthetic Shape Connecting Unorganised 2D Points", author = "Stefan Ohrhallinger and Sudhir Mudur", year = "2013", abstract = "We present an efficient algorithm for determining an aesthetically pleasing shape boundary connecting all the points in a given unorganised set of 2D points, with no other information than point coordinates. By posing shape construction as a minimisation problem which follows the Gestalt laws, our desired shape Bmin is non-intersecting, interpolates all points and minimises a criterion related to these laws. The basis for our algorithm is an initial graph, an extension of the Euclidean minimum spanning tree but with no leaf nodes, called as the minimum boundary complex BCmin. BCmin and Bmin can be expressed similarly by parametrising a topological constraint. A close approximation of BCmin, termed BC0 can be computed fast using a greedy algorithm. BC0 is then transformed into a closed interpolating boundary Bout in two steps to satisfy Bmin’s topological and minimization requirements. Computing Bmin exactly is an NP-hard problem, whereas Bout is computed in linearithmic time. We present many examples showing considerable improvement over previous techniques, especially for shapes with sharp corners. Source code is available online.", month = dec, journal = "Computer Graphics Forum", volume = "32", number = "8", pages = "72--88", keywords = "curve reconstruction, boundary representation, sampling condition, computational geometry", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/ohrhallinger_stefan-2013-c2d/", } @article{Auzinger_Mistelbauer_2013_CSR, title = "Vessel Visualization using Curved Surface Reformation", author = "Thomas Auzinger and Gabriel Mistelbauer and Ivan Baclija and R\"{u}diger Schernthaner and Arnold K\"{o}chl and Michael Wimmer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2013", abstract = "Visualizations of vascular structures are frequently used in radiological investigations to detect and analyze vascular diseases. Obstructions of the blood flow through a vessel are one of the main interests of physicians, and several methods have been proposed to aid the visual assessment of calcifications on vessel walls. Curved Planar Reformation (CPR) is a wide-spread method that is designed for peripheral arteries which exhibit one dominant direction. To analyze the lumen of arbitrarily oriented vessels, Centerline Reformation (CR) has been proposed. Both methods project the vascular structures into 2D image space in order to reconstruct the vessel lumen. In this paper, we propose Curved Surface Reformation (CSR), a technique that computes the vessel lumen fully in 3D. This offers high-quality interactive visualizations of vessel lumina and does not suffer from problems of earlier methods such as ambiguous visibility cues or premature discretization of centerline data. Our method maintains exact visibility information until the final query of the 3D lumina data. We also present feedback from several domain experts.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics (Proceedings of IEEE Scientific Visualization 2013)", volume = "19", number = "12", pages = "2858--2867", keywords = "Surface Approximation, Vessel, Reformation, Volume Rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_Mistelbauer_2013_CSR/", } @article{Viola_Ivan_2013_VCA, title = "Visual cavity analysis in molecular simulations", author = "Julius Parulek and Cagatay Turkay and Nathalie Reuter and Ivan Viola", year = "2013", abstract = "Molecular surfaces provide a useful mean for analyzing interactions between biomolecules; such as identification and characterization of ligand binding sites to a host macromolecule. We present a novel technique, which extracts potential binding sites, represented by cavities, and characterize them by 3D graphs and by amino acids. The binding sites are extracted using an implicit function sampling and graph algorithms. We propose an advanced cavity exploration technique based on the graph parameters and associated amino acids. Additionally, we interactively visualize the graphs in the context of the molecular surface. We apply our method to the analysis of MD simulations of Proteinase 3, where we verify the previously described cavities and suggest a new potential cavity to be studied.", month = nov, issn = "1471-2105", journal = "BMC Bioinformatics", number = "Suppl 19:S4 ", volume = "14", pages = "1--15", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_VCA/", } @phdthesis{mistelbauer_2013_SIV, title = "Smart Interactive Vessel Visualization in Radiology ", author = "Gabriel Mistelbauer", year = "2013", abstract = "Cardiovascular diseases occur with increasing frequency in our society. Their diagnosis often requires tailored visualization techniques, e.g., to examine the blood flow channel in case of luminal narrowing. Curved Planar Reformation (CPR) addresses this field by creating longitudinal sections along the centerline of blood vessels. With the possibility to rotate around an axis, the entire vessel can be assessed for possible vascular abnormalities (e.g., calcifications on the vessel wall, stenoses, and occlusions). In this thesis, we present a visualization technique, called Centerline Reformation (CR), that offers the possibility to investigate the interior of any blood vessel, regardless of its spatial orientation. Starting from the projected vessel centerlines, the lumen of any vessel is generated by employing wavefront propagation in image space. The vessel lumen can be optionally delineated by halos, to enhance spatial relationships when examining a dense vasculature. We present our method in a focus+context setup, by rendering a different kind of visualization around the lumen. We explain how to resolve correct visibility of multiple overlapping vessels in image space. Additionally, our visualization method allows the examination of a complex vasculature by means of interactive vessel filtering and subsequent visual querying. We propose an improved version of the Centerline Reformation (CR) technique, by generating a completely three-dimensional reformation of vascular structures using ray casting. We call this process Curved Surface Reformation (CSR). In this method, the cut surface is smoothly extended into the surrounding tissue of the blood vessels. Moreover, automatically generated cutaways reveal as much of the vessel lumen as possible, while still retaining correct visibility. This technique offers unrestricted navigation within the inspected vasculature and allows diagnosis of any tubular structure, regardless of its spatial orientation. The growing amount of data requires increasing knowledge from a user in order to select the appropriate visualization method for their analysis. In this thesis, we present an approach that externalizes the knowledge of domain experts in a human readable form and employs an inference system to provide only suitable visualization techniques for clinical diagnosis, namely Smart Super Views. We discuss the visual representation of such automatically suggested visualizations by encoding the respective relevance into shape and size of their view. By providing a smart spatial arrangement and integration, the image becomes the menu itself. Such a system offers a guided medical diagnosis by domain experts. After presenting the approach in a general setting, we describe an application scenario for diagnostic vascular visualization techniques. Since vascular structures usually consist of many vessels, we describe an anatomical layout for the investigation of the peripheral vasculature of the human lower extremities. By aggregating the volumetric information around the vessel centerlines in a circular fashion, we provide only a single static image for the assessment of the vessels. We call this method Curvicircular Feature Aggregation (CFA). In addition, we describe a stability analysis on the local deviations of the centerlines of vessels to determine potentially imprecise definitions. By conveying this information in the visualization, a fast visual analysis of the centerline stability is feasible. ", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mistelbauer_2013_SIV/", } @bachelorsthesis{mlinaric-2013-volsurf, title = "Direct Volume Rendering for Polygon Models Embedded into Volumetric CT Data", author = "Marko Mlinaric", year = "2013", abstract = "For a better visualization and understanding of volumetric data, combinations of surface and volume rendering can be used. Since, the rendering techniques used for polygon surfaces and volumes are different, the combination is not trivial. The presented methodology enhances the state-of-the-art techniques Maximum Intensity Projection (MIP), Direct Volume Rendering (DVR) and Maximum Intensity Difference Accumulation (MIDA) with methods to render surfaces, by buffering the positions, normals and colors of the polygon models, and utilizing them in the ray marching process. The methods have been implemented in the AngioVis Toolbox as a plugin.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mlinaric-2013-volsurf/", } @mastersthesis{Dogangonul-2013-sparkGL, title = "Shading Framework for Modern Rendering Engines", author = "Onur Dogang\"{o}n\"{u}l", year = "2013", abstract = "In real-time computer graphics algorithms are often implemented using shaders. Even simple e ects possibly comprise several shader les. Advanced material-light interactions are often accompanied by level-of-detail and occlusion optimizations, therefore, graphics developers have to cope with various shader les often including duplicate shader code. Furthermore the algorithms are forced to be divided into per-stage procedures, which also contributes to the complexity of shader programming. Based on existing research a framework in OpenGL should be implemented to facilitate shader development. Furthermore basic means for outputting intermediate computation results should be provided to enhance debugging capabilities.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "OpenGL, Shading Language, Framework, Spark, DirectX", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Dogangonul-2013-sparkGL/", } @misc{Viola_2013_IDV, title = "Importance Driven Visualization of Molecular Surfaces", author = "Julius Parulek and Timo Ropinski and Ivan Viola", year = "2013", abstract = "We present a novel rendering method, based on the level-of-detail concept, which shows protein complexes over time in real-time. We exploit three different molecular surface models, solvent excluded surface (SES), Gaussian kernels and van der Waals spheres combined in one seamless visualization. As a general rule, closest to the viewer we aim at providing a maximum of relevant information related to the structure and binding sites. Such information is conveyed by the SES representation. Farther away from the viewer, we are smoothly changing the visual representation to an approximation of SES through Gaussian kernels. The least detailed representation is based on simple sphere splatting and is dedicated to structures farthest away from the viewer. A more general solution leads us to the definition of a 3D importance function that is based on the distance measure from a molecular feature. In order to preserve smoothness in transition areas, we introduce three shading levels that correspond to their geometric counterparts and a method for creating seamless transition between these representations. The SES representation with full shading and added contours stands in focus while on the other side a sphere representation with constant shading and without contours provide the context. Moreover, we introduce a methodology to render the entire molecule directly using the A-buffer technique, which further improves the performance. The rendering performance is evaluated on series of molecules of varying atom counts.", month = oct, publisher = "BioVis website (http://www.biovis.net/year/2013/posters)", location = "Atlanta, GA", event = "3rd IEEE Symposium on Biological Data Visualization", Conference date = "Poster presented at 3rd IEEE Symposium on Biological Data Visualization (2013-10-13--2013-10-14)", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_2013_IDV/", } @misc{diehl-2013-VTA, title = "Visual Trend Analysis in Weather Forecast", author = "Alexandra Diehl and Stefan Bruckner and Eduard Gr\"{o}ller and Claudio Delrieux and Celeste Saulo", year = "2013", abstract = "Weather conditions affect multiple aspects of human life such as economy, safety, security, and social activities. Weather forecast significantly influences decision and policy making, construction planning, productivity, and environmental risk management. Visualization of weather conditions and trends assists the anticipation of unexpected meteorological events and thus helps with appropriate actions and mitigation systems to minimize the impact of them on human life and activities. In this work, we propose an interactive approach for visual analysis of weather trends and forecast errors in short-term weather forecast simulations. Our solution consists of a multi-aspect system that provides different methods to visualize and analyze multiple runs, time-dependent data, and forecast errors. A key contribution of this work is the comparative visualization technique that allows users to analyze possible weather trends and patterns. We illustrate the usage of our approach with a case study designed and validated in conjunction with domain experts.", month = oct, location = "Atlanta, Georgia, USA", event = "IEEE VIS 2013 Conference", Conference date = "Poster presented at IEEE VIS 2013 Conference (2013-10-13--2013-10-18)", keywords = "Interactive Visual Analysis, Comparative Visualization, Weather Forecast Research", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/diehl-2013-VTA/", } @inproceedings{birsak-2013-sta, title = "Seamless Texturing of Archaeological Data", author = "Michael Birsak and Przemyslaw Musialski and Murat Arikan and Michael Wimmer", year = "2013", abstract = "In this paper we propose a framework for out-of-core real-time rendering of high-quality textured archaeological data-sets. Our input is a triangle mesh and a set of calibrated and registered photographs. Our system performs the actual mapping of the photos to the mesh for high-quality reconstructions, which is a task referred to as the labeling problem. Another problem of such mappings are seams that arise on junctions between triangles that contain information from different photos. These are are approached with blending methods, referred to as leveling. We address both problems and introduce a novel labeling approach based on occlusion detection using depth maps that prevents texturing of parts of the model with images that do not contain the expected region. Moreover, we propose an improved approach for seam-leveling that penalizes too large values and helps to keep the resulting colors in a valid range. For high-performance visualization of the 3D models with a huge amount of textures, we make use of virtual texturing, and present an application that generates the needed texture atlas in significantly less time than existing scripts. Finally, we show how the mentioned components are integrated into a visualization application for digitized archaeological site.", month = oct, isbn = "978-1-4799-3168-2 ", publisher = "IEEE", note = "DOI: 10.1109/DigitalHeritage.2013.6743749", location = "Marseille, France", booktitle = "Digital Heritage International Congress (DigitalHeritage), 2013", pages = "265--272 ", keywords = "digital cultural heritage, out-of-core real-time rendering, seamless texturing, virtual texturing", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/birsak-2013-sta/", } @WorkshopTalk{oeltze-2013-tut, title = "IEEE VIS Tutorial on Interactive Visual Analysis of Scientific Data", author = "Steffen Oeltze and Helwig Hauser and Johannes Kehrer", year = "2013", abstract = "In a growing number of application areas, a subject or phenomenon is investigated by means of multiple datasets being acquired over time (spatiotemporal), comprising several attributes per data point (multi-variate), stemming from different data sources (multi-modal) or multiple simulation runs (multi-run/ensemble). Interactive visual analysis (IVA) comprises concepts and techniques for a user-guided knowledge discovery in such complex data. Through a tight feedback loop of computation, visualization and user interaction, it provides new insight into the data and serves as a vehicle for hypotheses generation or validation. It is often implemented via a multiple coordinated view framework where each view is equipped with interactive drill-down operations for focusing on data features. Two classes of views are integrated: physical views, such as direct volume rendering, show information in the context of the spatiotemporal observation space while attribute views, such as scatter plots and parallel coordinates, show relationships between multiple data attributes. The user may drill-down the data by selecting interesting regions of the observation space or attribute ranges leading to a consistent highlighting of this selection in all other views (brushing-and-linking). Three patterns of explorative/analytical procedures may be accomplished by doing so. In a feature localization, the user searches for places in the 3D/4D observation space where certain attribute values are present. In a multi-variate analysis, relations between data attributes are investigated, e.g., by searching for correla- tions. In a local investigation, the user inspects the values of selected attributes with respect to certain spatiotemporal subsets of the observation space. In this tutorial, we discuss examples for successful applications of IVA to scientific data from various fields: climate research, medicine, epidemiology, and flow simulation / computation, in particular for automotive engineering. We base our discussions on a theoretical foundation of IVA which helps the tutorial attendees in transferring the subject matter to their own data and application area. In the course of the tutorial, the attendees will become acquainted with techniques from statistics and knowledge discovery, which proved to be particularly useful for a specific IVA application. The tutorial further comprises an overview of off-the-shelf IVA solutions, which may be be particularly interesting for visualization practitioners. It is concluded by a summary of the gained knowledge and a discussion of open problems in IVA of scientific data. The tutorial slides will be available at: http://tinyurl.com/SciDataIVA13", month = oct, event = "IEEE VisWeek", location = "Atlanta, Georgia, USA", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/oeltze-2013-tut/", } @inproceedings{sorger-2013-neuromap, title = "neuroMAP - Interactive Graph-Visualization of the Fruit Fly's Neural Circuit", author = "Johannes Sorger and Katja B\"{u}hler and Florian Schulze and Tianxiao Liu and Barry Dickson", year = "2013", abstract = "Neuroscientists study the function of neural circuits in the brain of the common fruit fly Drosophila melanogaster to discover how complex behavior is generated. To establish models of neural information processing, knowledge about potential connections between individual neurons is required. Connections can occur when the arborizations of two neurons overlap. Judging connectivity by analyzing overlaps using traditional volumetric visualization is difficult since the examined objects occlude each other. A more abstract form of representation is therefore desirable. In collaboration with a group of neuroscientists, we designed and implemented neuroMap, an interactive two-dimensional graph that renders the brain and its interconnections in the form of a circuit-style wiring diagram. neuroMap provides a clearly structured overview of all possible connections between neurons and offers means for interactive exploration of the underlying neuronal database. In this paper, we discuss the design decisions that formed neuroMap and evaluate its application in discussions with the scientists.", month = oct, publisher = "IEEE", location = "Atlanta", booktitle = "Biological Data Visualization (BioVis), 2013 IEEE Symposium on ", pages = "73--80", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/sorger-2013-neuromap/", } @article{ohrhallinger_stefan-2013-smi, title = "Minimizing Edge Length to Connect Sparsely Sampled Unorganized Point Sets", author = "Stefan Ohrhallinger and Sudhir Mudur and Michael Wimmer", year = "2013", abstract = "Most methods for interpolating unstructured point clouds handle densely sampled point sets quite well but get into trouble when the point set contains regions with much sparser sampling, a situation often encountered in practice. In this paper, we present a new method that provides a better interpolation of sparsely sampled features. We pose the surface construction problem as finding the triangle mesh which minimizes the sum of all triangles’ longest edge. The output is a closed manifold triangulated surface Bmin. Exact computation of Bmin for sparse sampling is most probably NP-hard, and therefore we introduce suitable heuristics for its computing. The algorithm first connects the points by triangles chosen in order of their longest edge and with the requirement that all edges must have at least 2 incident triangles. This yields a closed non-manifold shape which we call the Boundary Complex. Then we transform it into a manifold triangulation using topological operations. We show that in practice, runtime is linear to that of the Delaunay triangulation of the points.", month = oct, journal = "Computers & Graphics (Proceedings of Shape Modeling International 2013)", volume = "37", number = "6", issn = "0097-8493", pages = "645--658", keywords = "point cloud, reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/ohrhallinger_stefan-2013-smi/", } @mastersthesis{fink-2013-gvp, title = "GPU-based Video Processing in the Context of TV Broadcasting", author = "Heinrich Fink", year = "2013", abstract = "This thesis investigates GPU-based video processing in the context of a graphics system for live TV broadcasting. Upcoming TV standards like UHD-1 result in much higher data rates than existing formats. Processing such data rates while satisfying the real-time requirement of live TV poses a particular challenge for the implementation of a software-based broadcast graphics system. In order to reach the required data rates, the software needs to process image data concurrently on the central processing unit (CPU) and graphics processing unit (GPU) of the machine. In particular, the transfers of image data between main and graphics memory need to be overlapped with CPU-based and GPU-based executions in order to maximize data throughput. In this thesis, we therefore investigate the following questions: Which methods are available to a software implementation in order to reach this level of parallelism? Which data rates can actually be reached using these methods? In order to answer these questions, we implement a prototype of a software for rendering TV graphics. To take advantage of the GPU’s ability to efficiently process image data, we use the OpenGL application programming interface (API). We use advanced methods of OpenGL programming to render high-quality video and increase the level of employed parallelism of the GPU. We implement the transcoding between RGB and the professional video format V210, which is more complex to process than conventional consumer-oriented image formats. In our software, we apply the pipeline programming pattern in order to distribute stages of the video processing algorithm to different threads. As a result, those stages execute concurrently on different hardware units of the system. Our prototype exposes the applied degree of concurrency to the user as a collection of different optimization settings. In order to evaluate these optimizations, we integrate a profiling mechanism directly into the execution of the pipeline. This allows us to automatically create performance profiles while running our prototype with various test scenarios. The results of this thesis are based on the analysis of these traces. Our prototype shows that the methods described in this thesis enable a software program to process high-resolution video in high quality. The results of our evaluations also show that there is no single best optimization setting for every GPU architecture. Different driver implementations and hardware features require our prototype to apply different optimization settings for each device. The ability of our software structure to dynamically change the degree of concurrency is therefore an important feature. For broadcasting software that is expected to perform well on a range of hardware devices, this is ultimately an essential feature. ", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "transfer, rendering, video processing, concurrency, OpenGL", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/fink-2013-gvp/", } @bachelorsthesis{sperl-2013-BA, title = "Procedural Textures for Architectural Models ", author = "Georg Sperl", year = "2013", abstract = "Texturing plays an extremely important role in computer graphics. They may for example give flat objects colour patterns and depth or roughness. However it is often difficult to find or create fitting textures for a geometry or surface. Creating textures by hand can prove extremely difficult as problems with seams or distortion may occur, which make the texture look unrealistic. In order to counter these problems many different kinds of texture synthesis have been developed among which procedural textures hold an important position. The types of texture synthesis methods vary from example-based synthesis to procedural noise, cellular textures and more, which can produce a wide array of different results. The aim of this paper is to introduce the reader into related topics of texture synthesis and to introduce a method for generating cellular brick pattern textures that represent brick bonds that are used in real architecture taking correct handling of wall and window borders into consideration. The method uses few parameters to control the synthesis process. In addition a method for generating random stone patterns is introduced. ", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "brick texutre synthesis, cellular textures, texture synthesis, stone texture synthesis", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/sperl-2013-BA/", } @misc{Auzinger_2013_SAR, title = "Sampled and Analytic Rasterization", author = "Thomas Auzinger and Michael Wimmer", year = "2013", abstract = "In this poster we present an overview of exact anti-aliasing (AA) methods in rasterization. In contrast to the common supersampling approaches for visibility AA (e.g. MSAA) or both visibility and shading AA (e.g. SSAA, decoupled sampling), prefiltering provides the mathematically exact solution to the aliasing problem. Instead of averaging a set a supersamples, the input data is convolved with a suitable low-pass filter before sampling is applied. Recent work showed that for both visibility signals and simple shading models, a closed-form solution to the convolution integrals can be found. As our main contribution, we present a classification of both sample-based and analytic AA approaches for rasterization and analyse their strengths and weaknesses.", month = sep, series = "VMV ", publisher = "Eurographics Association", location = "Lugano, Switzerland", isbn = "978-3-905674-51-4", event = "VMV 2013", booktitle = "Proceedings of the 18th International Workshop on Vision, Modeling and Visualization", Conference date = "Poster presented at VMV 2013 (2013-09-11--2013-09-13)", note = "223--224", pages = "223 – 224", keywords = "Anti-Aliasing, Rasterization, Sampling, Supersampling, Prefiltering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_SAR/", } @bachelorsthesis{leimer-2013-esopc, title = "External Sorting of Point Clouds", author = "Kurt Leimer", year = "2013", abstract = "In recent years, points have seen increased use as a rendering primitive. This Bachelor Thesis presents and compares a number of sorting algorithms used for sorting points in a preprocessing step. This is done in order to decrease the time needed to create a point cloud model that can be rendered by the GPU in real time. Points are compared either by their position along the longest axis or by Morton order and sorted using heap sort or radix sort. An external merge sort algorithm is used for large datasets that do not completely fit into memory. The speed of the sorting process is furthermore increased by making use of parallel processing.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Point Clouds, Sorting, Out-of-Core algorithms", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/leimer-2013-esopc/", } @inproceedings{Auzinger_2013_NSAA, title = "Non-Sampled Anti-Aliasing", author = "Thomas Auzinger and Przemyslaw Musialski and Reinhold Preiner and Michael Wimmer", year = "2013", abstract = "In this paper we present a parallel method for high-quality edge anti-aliasing. In contrast to traditional graphics hardware methods, which rely on massive oversampling to combat aliasing issues in the rasterization process, we evaluate a closed-form solution of the associated prefilter convolution. This enables the use of a wide range of filter functions with arbitrary kernel sizes, as well as general shading methods such as texture mapping or complex illumination models. Due to the use of analytic solutions, our results are exact in the mathematical sense and provide objective ground-truth for other anti-aliasing methods and enable the rigorous comparison of different models and filters. An efficient implementation on general purpose graphics hardware is discussed and several comparisons to existing techniques and of various filter functions are given.", month = sep, isbn = "978-3-905674-51-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Lugano, Switzerland", event = "Vision, Modelin, Visualization (VMV)", editor = "Michael Bronstein and Jean Favre and Kai Hormann", booktitle = "Proceedings of the 18th International Workshop on Vision, Modeling and Visualization (VMV 2013)", pages = "169--176", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_NSAA/", } @bachelorsthesis{AndreUrsits_2013_APP, title = "AngioVis Patient Persistency Object Relation Mapping for Large Relational Datasets of Binary Data", author = "Manuel Andre and Georg Ursits", year = "2013", abstract = "A usual workstation in medicine processes a significant amount of patients per day. It is critical in a clinical daily routine, to retrieve, store and access analysed data in a fast pace. An application in this context needs to be responsive and speed up current processes. Due to the necessity of storing all these data, current development in database technologies provides opportunities to improve their management. In this thesis, we will investigate, how such a technologies can be transferred to a specific application scenario. Furthermore there are several application parameters, like load time, response delay and integrity of the stored data that can be improved, to have a positive impact on a medical workflow. Apart those performance related parameters there are several other factors like extendibility, scalability and structure that are taken into consideration as well.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/AndreUrsits_2013_APP/", } @article{musialski-2013-surcgf, title = "A Survey of Urban Reconstruction", author = "Przemyslaw Musialski and Peter Wonka and Daniel G. Aliaga and Michael Wimmer and Luc van Gool and Werner Purgathofer", year = "2013", abstract = "This paper provides a comprehensive overview of urban reconstruction. While there exists a considerable body of literature, this topic is still under very active research. The work reviewed in this survey stems from the following three research communities: computer graphics, computer vision, and photogrammetry and remote sensing. Our goal is to provide a survey that will help researchers to better position their own work in the context of existing solutions, and to help newcomers and practitioners in computer graphics to quickly gain an overview of this vast field. Further, we would like to bring the mentioned research communities to even more interdisciplinary work, since the reconstruction problem itself is by far not solved.", month = sep, issn = "1467-8659", journal = "Computer Graphics Forum", number = "6", volume = "32", pages = "146--177", keywords = "facade modeling, state-of-the-art report, multi-view stereo, structure from motion, urban modeling, urban reconstruction, inverse-procedural modeling, facade reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/musialski-2013-surcgf/", } @bachelorsthesis{schaukowitsch-2013-fls, title = "Pano.NET - An interactive application for camera calibration, image stitching and projective operations", author = "Florian Schaukowitsch", year = "2013", abstract = "The automatic generation of panoramic image mosaics has evolved into a mainstream consumer application in recent years, thanks to modern algorithms and methods that can generate beautiful- looking, artifact-free panoramas with minimal user interaction. Simple panoramas are only one possible application, other examples such as the scene-aware Google Street View show what is possible using these technologies. Software libraries such as OpenCV allow integration of image stitching and related operations into a multitude of applications with relative ease. In this thesis, we present an interactive application (Pano.NET) that allows the user to experiment with several commonly used methods for camera calibration and image stitching (as implemented in OpenCV), and export related data such as registration information. We also give an overview of the used algorithms. In addition, we implemented a projective drawing system that allows the user to draw lines and polygons onto a panorama and on the views that it consists of. Finally, a method is presented that can generate six-sided cube maps out of a spherical panorama.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "multi-view image processing, panorama imaging, image compositing", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/schaukowitsch-2013-fls/", } @bachelorsthesis{rasch_martina-2013-HDRImage, title = "HDR Image Acquisition for Augmented Reality", author = "Martina Rasch", year = "2013", abstract = "In this thesis I present a method for calculating high dynamic range images in a mixed reality system. Cameras and monitors usually have a lower dynamic range than we encounter in the real world, e.g. the sun. While pictures have a maximal contrast of 1:500, real world scenes often have a contrast of 1:100 000. An image taken of a scene with a higher dynamic range than our camera can capture will have regions that are too bright or too dark. With a higher exposure time more details will be visible in dark regions and with a lower exposure time more details will be visible in bright regions. Since our camera cannot create an image preserving details in both dark and bright regions we have to calculate one using the images our camera can actually produce. The method described in this thesis is based on the work of Debevec and Malik. It takes several images taken with different exposure times and combines them to a high dynamic range image, leading to a better viewing experience in our RESHADE framework, a mixed reality framework, for which this method was implemented.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "mixed reality, high dynamic range image", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/rasch_martina-2013-HDRImage/", } @mastersthesis{Maricic_2013_VFE, title = "Visual Feature Exploration for ssTEM Image Segmentation", author = "Ivan Maricic", year = "2013", abstract = " In order to (preferably) automatically derive the neuronal structures from brain tissue image stacks, the research field computational neuroanatomy relies on computer assisted techniques such as visualization, machine learning and analysis. The image acquisition is based on the so-called transmission electron microscopy (TEM) that allows resolution that is high enough to identify relevant structures in brain tissue images (less than 5 nm per pixel). In order to get to an image stack (or volume) the tissue samples are sliced (or sectioned) with a diamond knife in slices of 40 nm thickness. This approach is called serial-section transmission electron microscopy (ssTEM). The manual segmentation of these high-resolution, low-contrast and artifact afflicted images would be impracticable alone due to the high resolution of 200,000 images of size 2m x 2m pixel in a cubic centimeter tissue sample. But, the automatic segmentation is error-prone due to the small pixel value range (8 bit per pixel) and diverse artifacts resulting from mechanical sectioning of tissue samples. Additionally, the biological samples in general contain densely packed structures which leads to non-uniform background that introduces artifacts as well. Therefore, it is important to quantify, visualize and reproduce the automatic segmentation results interactively with as few user interaction as possible. This thesis is based on the membrane segmentation proposed by Kaynig-Fittkau [2011] which for ssTEM brain tissue images outputs two results: (a) a certainty value per pixel (with regard to the analytical model of the user selection of cell membrane pixels) which states how certain the underlying statistical model is that the pixel is belonging to the membrane , and (b) after an optimization step the resulting edges which represent the membrane. In this work we present a visualization-assisted method to explore the parameters of the segmentation. The aim is to interactively mark those regions where the segmentation fails to the expert user in order to structure the post- or re-segmentation or to prove-read the segmentation results. This is achieved by weighting the membrane pixels by the uncertainty values resulting from the segmentation process. We would like to start here and employ user knowledge once more to decide which data and in what form should be introduced to the random forest classifier in order to improve the segmentation results either through segmentation quality or segmentation speed. In this regard we use focus our attention especially on the visualizations of the uncertainty, the error and multi-modal data. The interaction techniques are explicitly used in those cases where we expect the highest gain at the end of the exploration. We show the effectiveness of the proposed methods using the freely available ssTEM brain tissue dataset of the drosophila fly. Because we lack the expert knowledge in the field of neuroanatomy re must rely our assumptions and methods on the underlying ground truth segmentations of the drosophila fly brain tissue dataset. ", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Maricic_2013_VFE/", } @article{Viola_Ivan_2013_GS, title = "Geological storytelling", author = "Endre M. Lidal and Mattia Natali and Daniel Patel and Helwig Hauser and Ivan Viola", year = "2013", abstract = "Developing structural geological models from exploratory subsea imaging is difficult and an ill-posed process. The structural geological processes that take place in the subsurface are both complex and time-dependent. We present Geological Storytelling, a novel graphical system for performing rapid and expressive geomodeling. Geologists can convey geological stories that externalize both their model and the reasoning process behind it through our simple, yet expressive sketch-based, flip-over canvases. This rapid modeling interface makes it easy to construct a large variety of geological stories, and our story tree concept facilitates easy management and the exploration of these alternatives. The stories are then animated and the geologists can examine and compare them to identify the most plausible models. Finally, the geological stories can be presented as illustrative animations of automatically synthesized 3D models, which efficiently communicate the complex geological evolution to non-experts and decision makers. Geological storytelling provides a complete pipeline from the ideas and knowledge in the mind of the geologist, through externalized artifacts specialized for discussion and knowledge dissemination among peer-experts, to automatically rendered illustrative 3D animations for communication to lay audience. We have developed geological storytelling in collaboration with domain experts that work with the modeling challenges on a daily basis. For evaluation, we have developed a geological storytelling prototype and presented it to experts and academics from the geosciences. In their feedback, they acknowledge that the rapid and expressive sketching of stories can make them explore more alternatives and that the 3D illustrative animations assist in communicating their models.", month = aug, issn = "0097-8493", journal = "Computer & Graphics", number = "5", volume = "37", pages = "445--459", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_GS/", } @book{Sturn-2013-ST, title = "Sturning Technology - Real-Time Rendering of Non Photorealistic 3d Worlds", author = "Tobias Sturn", year = "2013", abstract = "We introduce a new technology, “Sturning Technology” for artistic, non photorealistic, emotional real time rendering of 3d scenes and blending between the different emotional renderings to show the current emotional state of the viewer of the scene. The European art history with Impressionism, Expressionism and Romanticism is taken as reference for creating these emotional renderings because the painters of these areas wanted to evoke nothing more but pure emotions in just one single “frame”. This technology can be used for all kinds of interactive applications but mainly for games in which the player naturally undergoes many different sensations. We believe that emotional renderings can help a lot to create a much deeper emotional gaming experience where the graphics are directly linked to the emotional state of the player.", month = jul, isbn = "978-3-639-47141-0", pages = "104", publisher = "AV Akademikerverlag", keywords = "Computer Graphics, Emotional Rendering, Non Photorealistic Rendering, Real-Time Rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Sturn-2013-ST/", } @inproceedings{STURN-2013-LSI, title = "Landspotting: A Serious iPad Game for Improving Global Land Cover", author = "Tobias Sturn and Dietmar Pangerl and Linda See and Steffen Fritz and Michael Wimmer", year = "2013", abstract = "Current satellite-derived land cover products, which are very important for answering many crucial research and policy-related questions, show huge disagreements. In this paper we present a serious game for the iPad with the purpose of improving global land cover data. We describe the game, discuss the design decisions made and outline the challenges faced while developing the game. We evaluate how well the players are able to annotate land cover by comparing the game against expert validations collected using the Geo-Wiki tool and provide evidence that games can be a useful way to increase the quality of global land cover.", month = jul, isbn = "978-3-87907-532-4", publisher = "Verlag der \"{O}sterreichischen Akademie der Wissenschaften Austrian Academy of Sciences Press ", organization = "Z_GIS - Department of Geoinformatics", location = "University of Salzburg", booktitle = "Proceedings of the GI-Forum 2013 -- Creating the GISociety", pages = "81--90", keywords = "Landspotting, Serious Game, Improving Global Land Cover", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/STURN-2013-LSI/", } @bachelorsthesis{kaserer-2013-webdicom, title = "DICOM Web Viewer", author = "Michael Kaserer", year = "2013", abstract = "The aim of this thesis is to introduce a web-based and open source DICOM viewer. Web technologies have already been successfully used in medical informatics. Since software used for medical applications is commonly proprietary and mostly restricted to certain operation systems, the combination of web technologies together with the DICOM standard is a natural next step. For this reason, this Bachelor's Thesis attempts to lay the basis for such an open source project. Digital Imaging and Communication in Medicine (DICOM) is an established and nonproprietary standard for the storage and the exchange of information in medical imaging. DICOM defines not only a data format, but it specifies also a network protocol based on the ISO/OSI-model. This work provides first an overview of the DICOM's history and describes its main concepts. Health Level 7 (HL7) is an industry standard for the data exchange between different hospital information systems. Integrating the Healthcare Enterprise (IHE) is an initiative that aims to harmonize the exchange of data between information systems in health by promoting common standards such as DICOM and HL7. Based on a clinical workflow, the interaction of DICOM and HL7 by using IHE is shown. The DICOM Web Viewer was designed as a "Rich Internet Application" and developed in JavaScript, HTML5 and CSS3. To create an easy to use user interface, state-of-the-art DICOM viewers were investigated and a common set of features were identified. DICOM files are parsed directly in the browser using JavaScript and then visualized using the HTML5 element. In the appendix the source code is described in detail and all third-party libraries get listed. The implementation shows, that web technologies are well suited to view DICOM images in a web browser. Especially its cross-platform usability makes the DICOM Web Viewer even more promising for the future and may replace traditional proprietary software.", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/kaserer-2013-webdicom/", } @misc{Auzinger_2013_AnaRaster, title = "Analytic Rasterization on GPGPUs", author = "Thomas Auzinger", year = "2013", abstract = "In this poster we present an overview of our work on analytic anti-aliased rasterization. In contrast to the traditional supersampling approach, we use exact prefiltering to remove frequencies above the Nyquist limit. This is enabled by performing exact hidden surface elimination to compute the visible regions of all scene primitives. We derived a closed-form solution to subsequent prefilter convolution which guarantees high-quality anti-aliasing up to numerical precision. Our analytic rasterization pipeline is based on several highly parallel algorithms and an efficient implementation in CUDA is presented as well as several result images of its performance on complex scenes. ", month = jul, note = "Best Poster Award", location = "Barcelona", event = "PUMPS Summer School", Conference date = "Poster presented at PUMPS Summer School (2013-07-08--2013-07-12)", keywords = "hidden surface elimination, vector format, analytic, rasterization, CUDA, GPGPU, prefiltering, anti-aliasing, parallel, aliasing, closed-form, visibility, convolution, filter", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_AnaRaster/", } @mastersthesis{Parzer_2013_IIG, title = "Irrational Image Generator", author = "Simon Parzer", year = "2013", abstract = "An approach called Inductive Rotation (IR), developed by artist Hofstetter Kurt, can be used to create intricate patterns that fill the 2D plane from a single prototile by repeated translation and rotation. These patterns are seemingly nonperiodic and have interesting features, both from a mathematical and artistic viewpoint. The IR method has not yet been described in scientific literature. It is related to and has been inspired by aperiodic tilings like the well-known Penrose tilings. During the course of this thesis some research on the patterns generated by Inductive Rotation has been done and algorithms that allow for automatic generation of these patterns have been developed. The implementation is then called the Irrational Image Generator, a tool that on the one hand is a reference implementation of the IR method,and on the other hand can be used by the artist for further experimentation to fully utilize the artistic possibilities of the IR approach. The Irrational Image Generator is preceded by a series of prototypes, that have been developed to get a better grasp of the expected results and performance of the tool. Each prototype as well as the final implementation were tested by Hofstetter Kurt. This iterative development process has led to two different implementation approaches that both have their advantages and disadvantages. For this reason, both methods have been considered in the final implementation. Generation algorithms that operate on geometry instead of directly manipulating bitmap data have been developed. The program makes use of the GPU through OpenGL to render the resulting patterns through textured polygons. It turns out that run-time and memory usage of the IR algorithm grow exponentially with the number of iterations. This means that iteration numbers are limited, although the tool’s performance is sufficient for artistic purposes.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Parzer_2013_IIG/", } @inproceedings{JAHRMANN-2013-IGR, title = "Interactive Grass Rendering Using Real-Time Tessellation", author = "Klemens Jahrmann and Michael Wimmer", year = "2013", abstract = "Grass rendering is needed for many outdoor scenes, but for real-time applications, rendering each blade of grass as geometry has been too expensive so far. This is why grass is most often drawn as a texture mapped onto the ground or grass patches rendered as transparent billboard quads. Recent approaches use geometry for blades that are near the camera and flat geometry for rendering further away. In this paper, we present a technique which is capable of rendering whole grass fields in real time as geometry by exploiting the capabilities of the tessellation shader. Each single blade of grass is rendered as a two-dimensional tessellated quad facing its own random direction. This enables each blade of grass to be influenced by wind and to interact with its environment. In order to adapt the grass field to the current scene, special textures are developed which encode on the one hand the density and height of the grass and on the other hand its look and composition.", month = jun, isbn = "978-80-86943-74-9", location = "Plzen, CZ", editor = "Manuel Oliveira and Vaclav Skala", booktitle = "WSCG 2013 Full Paper Proceedings", pages = "114--122", keywords = "grass rendering, real-time rendering, billboards", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/JAHRMANN-2013-IGR/", } @article{Viola_Ivan_2013_RMA, title = "Rule-based method for automatic scaffold assembly from 3D building models", author = "Tyge Løvset and Dag Magne Ulvang and Tor Christian Bekkvik and K{\aa}re Villanger and Ivan Viola", year = "2013", abstract = "To manually specify an optimal scaffold assembly for a given building geometry is a time consuming task. Our goal is to automate the process of selecting and placing scaffold components in order to design an optimal scaffold assembly for a specific building. The resulting assembly must be possible to construct in practice, should be practical to use for the workers, must satisfy governmental rules and regulations and should ideally result in minimum accumulated component cost. We propose a novel procedural modeling pipeline based on an input house model. First we extract vital coordinates from the house model that define the 3D scaffold placement. These coordinates are the basis for defining the positioning of scaffold cells. In the next step we populate the cells with actual scaffold components geometry. The resulting model is visualized to assist the assembly process. Additionally it is decomposed into elementary building blocks to produce assembly component lists to estimate the scaffold cost estimates, compute the weight for transportation and packing of components from a warehouse. The result from the automated process is compared to scaffold design produced manually by a professional scaffold designer.", month = jun, issn = "0097-8493", journal = "Computer & Graphics", number = "4", volume = "37", pages = "256--268", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_RMA/", } @inproceedings{Viola_Ivan_2013_RSb, title = "Rapid Sketch-based 3D Modeling of Geology", author = "Endre M. Lidal and Morten Bendiksen and Daniel Patel and Ivan Viola", year = "2013", abstract = "We present and compare two different approaches for performing rapid 3D geological modeling. The ad-hoc approach is based on a composition of many specialized modeling functions, while the generic approach provides one powerful, generic modeling function. Our experiences after developing these two approaches are that the solution space of 3D geological modeling is more extensive than we initially expected and most likely larger than for other modeling domains such as architecture. Further, more research is needed to investigate whether it is possible to find one well defined toolset of sketching metaphors that is able to cover all of geological modeling.", month = jun, publisher = "Springer", organization = "Euro Vis 2013", note = "Workshop on Visualisation in Environmental Sciences (EnvirVis) (2013)", location = "Leipzig, Germany", booktitle = "Proceedings of EnvirVis Short Papers 2013, 2013", pages = "1--5", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_RSb/", } @article{mistelbauer-2013-cfa, title = "Vessel Visualization using Curvicircular Feature Aggregation", author = "Gabriel Mistelbauer and Anca Morar and Andrej Varchola and R\"{u}diger Schernthaner and Ivan Baclija and Arnold K\"{o}chl and Armin Kanitsar and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Radiological investigations are common medical practice for the diagnosis of peripheral vascular diseases. Existing visualization methods such as Curved Planar Reformation (CPR) depict calcifications on vessel walls to determine if blood is still able to flow. While it is possible with conventional CPR methods to examine the whole vessel lumen by rotating around the centerline of a vessel, we propose Curvicircular Feature Aggregation (CFA), which aggregates these rotated images into a single view. By eliminating the need for rotation, vessels can be investigated by inspecting only one image. This method can be used as a guidance and visual analysis tool for treatment planning. We present applications of this technique in the medical domain and give feedback from radiologists.", month = jun, journal = "Computer Graphics Forum", volume = "32", number = "3", pages = "231--240", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mistelbauer-2013-cfa/", } @inproceedings{scheiblauer-2013-wscg, title = "Analysis of Interactive Editing Operations for Out-of-Core Point-Cloud Hierarchies", author = "Claus Scheiblauer and Michael Wimmer", year = "2013", abstract = "In this paper we compare the time and space complexity of editing operations on two data structures which are suitable for visualizing huge point clouds. The first data structure was introduced by Scheiblauer and Wimmer [SW11] and uses only the original points from a source data set for building a level-of-detail hierarchy that can be used for rendering points clouds. The second data structure introduced by Wand et al. [WBB+07] requires additional points for the level-of-detail hierarchy and therefore needs more memory when stored on disk. Both data structures are based on an octree hierarchy and allow for deleting and inserting points. Besides analyzing and comparing these two data structures we also introduce an improvement to the points deleting algorithm for the data structure of Wand et al. [WBB+07], which thus allows for a more efficient node loading strategy during rendering.", month = jun, isbn = "978-80-86943-74-9", publisher = "Union Agency", location = "Plzen", editor = "Vaclav Skala", booktitle = "WSCG 2013 Full Paper Proceedings", pages = "123--132", keywords = "complexity analysis, point clouds, data structures, viewing algorithms", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/scheiblauer-2013-wscg/", } @article{karimov-2013-vivisection, title = "ViviSection: Skeleton-based Volume Editing", author = "Alexey Karimov and Gabriel Mistelbauer and Johanna Schmidt and Peter Mindek and Elisabeth Schmidt and Timur Sharipov and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Volume segmentation is important in many applications, particularly in the medical domain. Most segmentation techniques, however, work fully automatically only in very restricted scenarios and cumbersome manual editing of the results is a common task. In this paper, we introduce a novel approach for the editing of segmentation results. Our method exploits structural features of the segmented object to enable intuitive and robust correction and verification. We demonstrate that our new approach can significantly increase the segmentation quality even in difficult cases such as in the presence of severe pathologies.", month = jun, journal = "Computer Graphics Forum", volume = "32", number = "3", pages = "461--470", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/karimov-2013-vivisection/", } @inproceedings{STURN-2013-LGI, title = "Landspotting - Games for Improving Global Land Cover", author = "Tobias Sturn and Michael Wimmer and Peter Purgathofer and Steffen Fritz", year = "2013", abstract = "Current satellite-derived land cover products, which are very important for answering many crucial questions, show huge disagreements. In this paper, we introduce four serious game prototypes - a Facebook strategy game played on Google Maps, a Facebook tagging game, a tower-defense game, and an aesthetic tile game for the iPad - with the purpose of improving global land cover data. We describe the games in detail and discuss the design decisions we made and challenges we faced while developing the games. We evaluate how much the players have already been able to improve global land cover data and provide evidence that games can be a useful way to increase the quality of this data. Finally, we discuss how the main game is being perceived by the players and what has to be further improved to attract a bigger audience.", month = may, location = "Chania, Greece", booktitle = "Proceedings of Foundations of Digital Games Conference 2013 (FDG 2013)", pages = "117--125", keywords = "Improving Global Land Cover, Serious Games, Landspotting", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/STURN-2013-LGI/", } @article{borgo-2013-gly, title = "Glyph-based Visualization: Foundations, Design Guidelines, Techniques and Applications", author = "Rita Borgo and Johannes Kehrer and David H.S. Chung and Eamonn Maguire and Robert S. Laramee and Helwig Hauser and Matthew Ward and Min Chen", year = "2013", abstract = "This state of the art report focuses on glyph-based visualization, a common form of visual design where a data set is depicted by a collection of visual objects referred to as glyphs. Its major strength is that patterns of multivariate data involving more than two attribute dimensions can often be more readily perceived in the context of a spatial relationship, whereas many techniques for spatial data such as direct volume rendering find difficult to depict with multivariate or multi-field data, and many techniques for non-spatial data such as parallel coordinates are less able to convey spatial relationships encoded in the data. This report fills several major gaps in the literature, drawing the link between the fundamental concepts in semiotics and the broad spectrum of glyph-based visualization, reviewing existing design guidelines and implementation techniques, and surveying the use of glyph-based visualization in many applications.", month = may, journal = "Eurographics State of the Art Reports", note = "http://diglib.eg.org/EG/DL/conf/EG2013/stars/039-063.pdf", publisher = "Eurographics Association", series = "EG STARs", pages = "39--63", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/borgo-2013-gly/", } @inproceedings{mindek-2013-csl, title = "Contextual Snapshots: Enriched Visualization with Interactive Spatial Annotations", author = "Peter Mindek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing particular features, they can be analyzed and compared in different views. However, the semantics of such selections are often dependent on other parameter settings and it can be difficult to reconstruct them without additional information. In this paper, we present the concept of contextual snapshots as an effective means for managing spatial selections in visualized data. The selections are automatically associated with the context in which they have been created. Contextual snapshots can be also used as the basis for interactive integrated and linked views, which enable in-place investigation and comparison of multiple visual representations of data. Our approach is implemented as a flexible toolkit with welldefined interfaces for integration into existing systems. We demonstrate the power and generality of our techniques by applying them to several distinct scenarios such as the visualization of simulation data and the analysis of historical documents.", month = may, series = "SCCG ", location = "Smolenice, Slovakia", booktitle = "Proceedings of the 29th Spring Conference on Computer Graphics", keywords = "spatial selections, annotations, linked views, provenance", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-csl/", } @bachelorsthesis{ADORJAN-2013-ASE, title = "Advanced Shadow Algorithms - Filtered Hard Shadows", author = "Matthias Adorjan", year = "2013", abstract = "Shadows play a very important role in enhancing realism of rendered scenes. Scenes without them look unnatural and flat. If you look at them you feel like there is something missing. Furthermore it is hard to determine a spatial relationship between the objects in a scene without shadows. In recent years a lot of papers and articles have been published on the topic of rendering realistic shadows in real time. Many of the presented techniques are based on shadow mapping, which has become widely accepted as a method for shadow rendering. This thesis focuses on giving an overview of commonly used methods to fight aliasing and produce soft-edged shadows in real time by filtering hard shadows created on the basis of shadow mapping. These filtering techniques are integrated into an existing framework, which allows the user to modify different essential parameters to find the best solution regarding shadow creation for a particular scene. Additionally gamma correction as another realism and image quality improving mechanism is explained. We present a tutorial on what to consider when implementing a gamma correct rendering pipeline in DirectX R 11. The resulting software of this thesis is freely available at http://www.realtimeshadows.com.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/ADORJAN-2013-ASE/", } @article{Reisner_2013_RSB, title = "Reconstruction of Shape Boundaries with Multimodal Constrains", author = "Irene Reisner-Kollmann and Stefan Maierhofer and Werner Purgathofer", year = "2013", abstract = "Shape primitives are a valuable input for reconstructing 3D models from point clouds. In this paper we present a method for clipping simple shape primitives at reasonable boundaries. The shape primitives, e.g. planes or cylinders, are 2D manifolds which are automatically detected in unstructured point clouds. Shape boundaries are necessary for generating valid 3D models from multiple shape primitives, because shape primitives possibly have dimensions of infinite extent or they are only partially present in the scene. Hints for reasonable boundaries of shape primitives are indicated by different input sources and constraints. Point clouds and range images provide information where shape primitives coincide with measured surface points. Edge detectors offer cues for surface boundaries in color images. The set of shape primitives is analyzed for constraints such as intersections. Due to an iterative approach, intermediate results provide additional constraints such as coplanar boundary points over multiple shape primitives. We present a framework for extracting and optimizing shape boundaries based on the given input data and multiple constraints. Further, we provide a simple user interface for manually adding constraints in order to improve the results. Our approach generates structurally simple 3D models from shape primitives and point clouds. It is useful for reconstructing scenes containing man-made objects, such as buildings, interior scenes, or engineering objects. The application of multiple constraints enables the reconstruction of proper 3D models despite noisy or incomplete point clouds. ", month = may, journal = "Computer & Graphics", number = "3", volume = "37", pages = "137--147", keywords = "Shape boundaries, Shape primitives; , Reconstruction;", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Reisner_2013_RSB/", } @inproceedings{ilcik-2013-cipmi, title = "Challenges and Ideas in Procedural Modeling of Interiors", author = "Martin Il\v{c}\'{i}k and Michael Wimmer", year = "2013", abstract = "While the creation of convincing cityscapes from the outside is already possible, there is a lack of robust and efficient techniques for modeling the interior of buildings. In particular, we focus on challenges for the subdivision of the interior space into rooms and for placement of furniture in those rooms.", month = may, isbn = "978-3-905674-46-0", publisher = "Eurographics Association", location = "Girona, Spain", issn = "2307-8251", editor = "Vincent Tourre and Gonzalo Besuievsky", booktitle = "Proceedings of Eurographics Workshop on Urban Data Modelling and Visualisation (UDMV 2013)", pages = "29--30", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/ilcik-2013-cipmi/", } @mastersthesis{Novotny_2013_ASVD, title = "Application of Smart Visibility on Medical 3D Ultrasound Datasets", author = "Johannes Novotny", year = "2013", abstract = "Ultrasonography is one of the standard medical imaging techniques used in pregnancy examinations. It is widely available due to its low cost, portability and simple diagnostic procedure. In recent years, three-dimensional ultrasound (US) imaging has been gaining popularity in the area of prenatal care. It provides examiners with a coherent visualization of the fetus. However, within scanned 3D datasets, the fetus is often surrounded by occluding structures which reduce its visibility in the resulting visualizations. Current ultrasound machines provide several methods to remove these occluders from visualizations. These methods are often difficult to control in the real-time setting of an US examination session. As a result, the work flow of US examinations has to be interrupted in order to apply occlusion removal to selected scans. In an attempt to reduce the required user interaction, this thesis evaluates a recently developed occlusion removal technique. The smart visibility method for prenatal ultrasound analyzes the ray profiles during an execution of a ray casting algorithm. This analysis identifies anatomic features within a dataset. From these features, a clipping surface that separates the fetus from its occluders is calculated. The clipping surface is used to remove occlusions from the visualization. The use of cutaway and ghosting visualization techniques allow an unoccluded view of the fetus, while retaining its general context within the volume. The clipping surface calculated by the smart visibility method is obtained by using a surface reconstruction algorithm. Within this thesis, different surface reconstruction techniques were evaluated for their occlusion removal quality and performance. It was possible to show that one of the evaluated approaches provides good results in a majority of test cases. This approach also reduces the required user interaction to a single parameter. To verify these results, the approach has been tested within a state-of-the-art US firmware. By manipulating the adjustable parameter through the hardware controls of an US machine, the smart visibility method can be used in real-time scenarios. GE Healthcare is evaluating the smart visibility method and considers to include it in upcoming versions of their US machine firmwares.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Novotny_2013_ASVD/", } @inproceedings{waldner-2013-ubiWM, title = "Towards Ubiquitous Information Space Management", author = "Manuela Waldner and Dieter Schmalstieg", year = "2013", abstract = "Large, high-resolution display spaces are usually created by carefully aligning multiple monitors or projectors to obtain a perfectly flat, rectangular display. In this paper, we suggest the usage of imperfect surfaces as extension of personal workspaces to create ubiquitous, personalized information spaces. We identify five environmental factors ubiquitous information spaces need to consider: 1) user location and display visibility, 2) display gaps and holes, 3) corners and non-planarity of the display surface, 4) physical objects within and around the display surface, and 5) non-rectangular display shapes. Instead of compensating for fragmentations and non-planarity of the information space, we propose a ubiquitous information space manager, adapting interaction and window rendering techniques to the above mentioned factors. We hypothesize that knowledge workers will benefit from such ubiquitous information spaces due to increased exploitation of spatial cognition. ", month = may, isbn = "978-1-4503-1952-2", publisher = "ACM", location = "Paris, France", booktitle = "POWERWALL: International Workshop on Interactive, Ultra-High-Resolution Displays, part of the SIGCHI Conference on Human Factors in Computing Systems (2013)", pages = "1--6", keywords = "information management, ubiquitous displays", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-ubiWM/", } @article{MATTAUSCH-2013-FSBE, title = "Freeform Shadow Boundary Editing", author = "Oliver Mattausch and Takeo Igarashi and Michael Wimmer", year = "2013", abstract = "We present an algorithm for artistically modifying physically based shadows. With our tool, an artist can directly edit the shadow boundaries in the scene in an intuitive fashion similar to freeform curve editing. Our algorithm then makes these shadow edits consistent with respect to varying light directions and scene configurations, by creating a shadow mesh from the new silhouettes. The shadow mesh helps a modified shadow volume algorithm cast shadows that conform to the artistic shadow boundary edits, while providing plausible interaction with dynamic environments, including animation of both characters and light sources. Our algorithm provides significantly more fine-grained local and direct control than previous artistic light editing methods, which makes it simple to adjust the shadows in a scene to reach a particular effect, or to create interesting shadow shapes and shadow animations. All cases are handled with a single intuitive interface, be it soft shadows, or (self-)shadows on arbitrary receivers.", month = may, journal = "Computer Graphics Forum (Proceeding of EUROGRAPHICS 2013)", volume = "32", number = "2", issn = "0167-7055", pages = "175--184", keywords = "shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/MATTAUSCH-2013-FSBE/", } @inproceedings{Viola_Ivan_2013_D3D, title = "Doppler-based 3D Blood Flow Imaging and Visualization", author = "{\AA}smund Birkeland and Dag Magne Ulvang and Kim Nylund and Trygve Hausken and Odd Helge Gilja and Ivan Viola", year = "2013", abstract = "Blood flow is a very important part of human physiology. In this paper, we present a new method for estimating and visualizing 3D blood flow on-the-fly based on Doppler ultrasound. We add semantic information about the geometry of the blood vessels in order to recreate the actual velocities of the blood. Assuming a laminar flow, the flow direction is related to the general direction of the vessel. Based on the center line of the vessel, we create a vector field representing the direction of the vessel at any given point. The actual flow velocity is then estimated from the Doppler ultrasound signal by back-projecting the velocity in the measured direction, onto the vessel direction. Additionally, we estimate the flux at user-selected cross-sections of the vessel by integrating the velocities over the area of the cross-section. In order to visualize the flow and the flux, we propose a visualization design based on traced particles colored by the flux. The velocities are visualized by animating particles in the flow field. Further, we propose a novel particle velocity legend as a means for the user to estimate the numerical value of the current velocity. Finally, we perform an evaluation of the technique where the accuracy of the velocity estimation is measured using a 4D MRI dataset as a basis for the ground truth.", month = may, isbn = "978-80-223-3377-1", publisher = "ACM Publishing House", location = "Smolenice, Slovak Republic", booktitle = "SCCG 2013 - 29th Proceedings Spring conference on Computer Graphics", pages = "128--135", keywords = "Medical Visualization, Biomedical", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_D3D/", } @inproceedings{Viola_Ivan_2013_HQ3, title = "High-Quality 3D Visualization of In-Situ Ultrasonography", author = "Ivan Viola and {\AA}smund Birkeland and Veronika Solteszova and Linn Helljesen and Helwig Hauser and Spiros Kotopoulis and Kim Nylund and Dag Magne Ulvang and Ola Kristoffer Øye and Trygve Hausken and Odd Helge Gilja", year = "2013", abstract = "In recent years medical ultrasound has experienced a rapid development in the quality of real-time 3D ultrasound (US) imaging. The image quality of the 3D volume that was previously possible to achieve within the range of a few seconds, is now possible in a fraction of a second. This technological advance offers entirely new opportunities for the use of US in the clinic. In our project, we investigate how real-time 3D US can be combined with high-performance processing of today’s graphics hardware to allow for high-quality 3D visualization and precise navigation during the examination.", month = may, publisher = "Eurogrpahics", note = "1st Prize - Medical Prize Short Paper", location = "Girona, Spain", booktitle = "EG 2013 - Dirk Bartz Prize", pages = "1--4", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_HQ3/", } @inproceedings{Viola_Ivan_2013_SVA, title = "Seamless Visual Abstraction of Molecular Surfaces", author = "Julius Parulek and Timo Ropinski and Ivan Viola", year = "2013", abstract = "Molecular visualization is often challenged with rendering of large sequences of molecular simulations in real time. We introduce a novel approach that enables us to show even large protein complexes over time in real-time. Our method is based on the level-ofdetail concept, where we exploit three different molecular surface models, solvent excluded surface (SES), Gaussian kernels and van der Waals spheres combined in one visualization. We introduce three shading levels that correspond to their geometric counterparts and a method for creating seamless transition between these representations. The SES representation with full shading and added contours stands in focus while on the other side a sphere representation with constant shading and without contours provide the context. Moreover, we introduce a methodology to render the entire molecule directly using the A-buffer technique, which further improves the performance. The rendering performance is evaluated on series of molecules of varying atom counts.", month = may, isbn = "978-80-223-3377-1", series = " SCCG '13", publisher = "ACM Publishing House", organization = "Comenius University, Bratislava", location = "Smolenice, Slovak Republic", editor = "Roman Durikovi\v{c}, Holly Rushmeier", booktitle = "SCCG 2013 - 29th Proceedings Spring conference on Computer Graphics", pages = "120--127", keywords = "Implicit Surfaces, Level-of-detail, Visualization of Molecular Surfaces", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_SVA/", } @inproceedings{amirkhanov_2013_AMA, title = "Fuzzy CT Metrology: Dimensional Measurements on Uncertain Data", author = "Artem Amirkhanov and Christoph Heinzl and Christoph Kuhn and Johann Kastner and Eduard Gr\"{o}ller", year = "2013", abstract = "Metrology through geometric dimensioning and tolerancing is an important instrument applied for industrial manufacturing and quality control. Typically tactile or optical coordinate measurement machines (CMMs) are used to perform dimensional measurements. In recent years industrial 3D X-ray computed tomography (3DXCT) has been increasingly applied for metrology due to the development of XCT systems with higher accuracy and their ability to capture both internal and external structures of a specimen within one scan. Using 3DXCT the location of the specimen surface is estimated based on the scanned attenuation coefficients. As opposed to tactile or optical measurement techniques, the surface is not explicit and implies a certain positional uncertainty depending on artifacts and noise in the scan data and the used surface extraction algorithm. Moreover, conventional XCT measurement software does not consider uncertainty in the data. In this work we present techniques which account for uncertainty arising in the XCT metrology data flow. Our technique provides the domain experts with uncertainty visualizations, which extend the XCT metrology workflow on different levels. The developed techniques are integrated into a tool utilizing linked views, smart 3D tolerance tagging and plotting functionalities. The presented system is capable of visualizing the uncertainty of measurements on various levels-of-detail. Commonly known geometric tolerance indications are provided as smart tolerance tags. Finally, we incorporate the uncertainty of the data as a context in commonly used measurement plots. The proposed techniques provide an augmented insight into the reliability of geometric tolerances while maintaining the daily workflow of domain specialists, giving the user additional information on the nature of areas with high uncertainty. The presented techniques are evaluated based on domain experts feedback in collaboration with our company partners.", month = may, isbn = "978-80-223-3377-1", publisher = "Comenius university, Bratislava, Slovakia", location = "Smolenice, Slovak Republic", booktitle = "SCCG 2013 - 29th Proceedings Spring conference on Computer Graphics", pages = "93--101", keywords = "metrology, uncertainty visualization, level-of-details, industrial 3D computed tomography", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/amirkhanov_2013_AMA/", } @article{Auzinger_2013_AnaVis, title = "Analytic Visibility on the GPU", author = "Thomas Auzinger and Michael Wimmer and Stefan Jeschke", year = "2013", abstract = "This paper presents a parallel, implementation-friendly analytic visibility method for triangular meshes. Together with an analytic filter convolution, it allows for a fully analytic solution to anti-aliased 3D mesh rendering on parallel hardware. Building on recent works in computational geometry, we present a new edge-triangle intersection algorithm and a novel method to complete the boundaries of all visible triangle regions after a hidden line elimination step. All stages of the method are embarrassingly parallel and easily implementable on parallel hardware. A GPU implementation is discussed and performance characteristics of the method are shown and compared to traditional sampling-based rendering methods.", month = may, journal = "Computer Graphics Forum (Proceeding of EUROGRAPHICS 2013)", volume = "32", number = "2", issn = "1467-8659", pages = "409--418", keywords = "GPU, anti-aliasing, SIMD, filter, rendering, analytic, visibility, close-form, hidden surface elimination, hidden surface removal, GPGPU", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_AnaVis/", } @inproceedings{Musialski-2013-ipmum, title = "Inverse-Procedural Methods for Urban Models", author = "Przemyslaw Musialski and Michael Wimmer", year = "2013", abstract = "Procedural modeling is an elegant and fast way to generate huge complex and realistically looking urban sites. Due to its generative nature it can also be referred to as forward-procedural modeling. Its major drawback is the usually quite complicated way of control. To overcome this difficulty a novel modeling paradigm has been introduced: it is commonly referred to as inverse procedural modeling, and its goal is to generate compact procedural descriptions of existing models---in the best case in an automatic manner as possible. These compact procedural representations can be used as a source for the synthesis of identical or similar objects, applied in various simulations and other studies of urban environments. We believe that this technology is still a widely unexplored ground and that it will prove itself as a very important tool in the reconstruction process. In this paper we sketch how inverse procedural modeling can be applied in the urban modeling field.", month = may, isbn = "978-3-905674-46-0", publisher = "Eurographics Association", location = "Girona, Spain", issn = "2307-8251", editor = "V. Tourre and G. Besuievsky", booktitle = "Proceedings of Eurographics Workshop on Urban Data Modelling and Visualisation (UDMV 2013)", pages = "31--32", keywords = "inverse procedural modeling, urban modeling, urban reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Musialski-2013-ipmum/", } @mastersthesis{MeierStauffer-2013-iihf, title = "Realistic Local Lighting in Dynamic Height Fields", author = "Aaron Meier-Stauffer", year = "2013", abstract = "This thesis presents a method to compute soft shadows from environment maps and local light sources on dynamic height fields, extending the work of Snyder et al. [29]. While direct illumination in static scenes is very common in video games and 3D applications, real-time global illumination methods supporting dynamic scenes and lights are still an active field of research. In this work, a short general introduction to global illumination and spherical harmonics is presented as well as an overview of the state of the art methods in interactive global illumination for height fields. In our method, visibility at each receiver point of a height field is determined by the visible horizon, which can be approximated efficiently using a multi-resolution sampling approach. Local light sources are represented by spherical lights and the incident radiance at receiver points is projected into the spherical harmonic basis. Hence, this method produces convincing shadows on dynamic height fields more efficiently than global illumination methods for general geometry.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Spherical Harmonics, Height Fields, Indirect Illumination", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/MeierStauffer-2013-iihf/", } @inproceedings{2013_Viola_Ivan_2013_MTS, title = "Modeling Terrains and Subsurface Geology", author = "Mattia Natali and Endre M. Lidal and Julius Parulek and Ivan Viola and Daniel Patel", year = "2013", abstract = "The process of creating terrain and landscape models is important in a variety of computer graphics and visualization applications, from films and computer games, via flight simulators and landscape planning, to scientific visualization and subsurface modelling. Interestingly, the modelling techniques used in this large range of application areas have started to meet in the last years. In this state-of-the-art report, we present two taxonomies of different modelling methods. Firstly we present a data oriented taxonomy, where we divide modelling into three different scenarios: the data-free, the sparse-data and the dense-data scenario. Then we present a workflow oriented taxonomy, where we divide modelling into the separate stages necessary for creating a geological model. We start the report by showing that the new trends in geological modelling are approaching the modelling methods that have been developed in computer graphics. We then give an introduction to the process of geological modelling followed by our two taxonomies with descriptions and comparisons of selected methods. Finally we discuss the challenges and trends in geological modelling.", month = may, publisher = "Eurographics 2013 - State of the Art Reports", organization = "Eurographics", location = "Girona, Spain", booktitle = "INPROCEEDINGS, EuroGraphics 2013 State of the Art Reports (STARs), 2013", pages = "155--173", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/2013_Viola_Ivan_2013_MTS/", } @inproceedings{waldner-2013-facetCloudsGI, title = "FacetClouds: Exploring Tag Clouds for Multi-Dimensional Data", author = "Manuela Waldner and Johann Schrammel and Michael Klein and Katrin Kristjansdottir and Dominik Unger and Manfred Tscheligi", year = "2013", abstract = "Tag clouds are simple yet very widespread representations of how often certain words appear in a collection. In conventional tag clouds, only a single visual text variable is actively controlled: the tags’ font size. Previous work has demonstrated that font size is indeed the most influential visual text variable. However, there are other variables, such as text color, font style and tag orientation, that could be manipulated to encode additional data dimensions. FacetClouds manipulate intrinsic visual text variables to encode multiple data dimensions within a single tag cloud. We conducted a series of experiments to detect the most appropriate visual text variables for encoding nominal and ordinal values in a cloud with tags of varying font size. Results show that color is the most expressive variable for both data types, and that a combination of tag rotation and background color range leads to the best overall performance when showing multiple data dimensions in a single tag cloud. ", month = may, isbn = "978-1-4822-1680-6 ", publisher = "ACM Publishing House", organization = "ACM Siggraph", location = "Regina, Saskatchewan, Canada", address = "Regina, Saskatchewan, Canada", booktitle = "Proceedings of the 2013 Graphics Interface Conference", pages = "17--24", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/", } @mastersthesis{ernst-2013-fif, title = "Comparison of Image Blurring Techniques on Modern Graphics Processing Hardware", author = "Markus Ernst", year = "2013", abstract = "The increasingly computational power and programmability of modern graphics hardware provides developers of real-time rendering applications with the resources needed to realize more and more complex graphical effects. Some of these effects, like Depth-of-Field, require an efficient image blurring technique to achieve real-time frame rates of 30 frames per second or above. This work presents a comparison of various blurring techniques in terms of their performance on modern graphics hardware. Whereas most of the chosen methods are exclusively used to blur an image, some of them are capable of applying an arbitrary filter. Furthermore, the quality of the different methods has been determined using an automatic process which utilizes a calibrated visual metric. Another aspect when using modern graphics hardware is the increasing scope of operations, especially in the domain of image processing, that can be carried out by using general-purpose computing on graphics processing units (GPGPU). In the recent years, utilizing GPGPU has become increasingly popular inside real-time rendering applications for special tasks like physics simulations. Therefore, all chosen algorithms have been implemented using shaders (GLSL) and GPGPU (CUDA), to answer the question whether or not the usage of a general purpose computing language is applicable for image blurring in real-time rendering and how it compares to using a shading language.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "glsl, cuda, image filters, gpu", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/ernst-2013-fif/", } @mastersthesis{Voglsam_2013_RRT, title = "Real-time Ray Tracing on the GPU - Ray Tracing using CUDA and kD-Trees", author = "G\"{u}nther Voglsam", year = "2013", abstract = "In computer graphics, ray tracing is a well-known image generation algorithm which exists since the late 1970s. Ray tracing is typically known as an offline algorithm, which means that the image generation process takes several seconds to minutes or even hours or days. In this thesis I present a ray tracer which runs in real-time. Real-time in terms of computer graphics means that 60 or more images per second (frames per second, FPS) are created. To achieve such frame rates, the ray tracer runs completely on the graphics card (GPU). This is possible by making use of Nvidia’s CUDA -API. With CUDA, it is possible to program the processor of a graphics card similar to a processor of a CPU. This way, the computational power of a graphics card can be fully utilized. A crucial part of any ray tracer is the acceleration data structure (ADS) used. The ADS is needed to efficiently search in geometric space. In this thesis, two variants of so called kD-Trees have been implemented. A kD-Tree is a binary tree, which divides at each node a given geometric space into two halves using an axis aligned splitting plane. Furthermore, a CUDA library for the rendering engine Aardvark, which is the in-house rendering engine at the VRVis Research Center, was developed to access CUDA functionality from within Aardvark in an easy and convenient way. The ray tracer is part of a current software project called “HILITE” at the VRVis Research Center.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Voglsam_2013_RRT/", } @incollection{sundstedt-2013-vag, title = "Visual Attention and Gaze Behaviour in Games: An Object-Based Approach", author = "Veronica Sundstedt and Matthias Bernhard and Efstathios Stavrakis and Erik Reinhard and Michael Wimmer", year = "2013", abstract = "This chapter presents state-of-the-art methods that tap the potential of psychophysics for the purpose of understanding game players' behavior. Studying gaze behavior in gaming environments has recently gained momentum as it affords a better understanding of gamers' visual attention. However, while knowing where users are attending in a computer game would be useful at a basic level, it does not provide insight into what users are interested in, or why. An answer to these questions can be tremendously useful to game designers, enabling them to improve gameplay, selectively increase visual fidelity, and optimize the distribution of computing resources. Furthermore, this could be useful in verifying game mechanics, improving game AI and smart positioning of advertisements within games, all being applications widely desirable across the games industry. Techniques are outlined to collect gaze data, and map fixation points back to semantic objects in a gaming environment, enabling a deeper understanding of how players interact with games. ", month = apr, booktitle = "Game Analytics: Maximizing the Value of Player Data ", editor = "M. Seif El-Nasr, A. Drachen, A. Canossa, K. Isbister,", isbn = "9781447147688", publisher = "Springer", keywords = "Eye Tracking, Visual Attention, Computer Games", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/sundstedt-2013-vag/", } @inproceedings{LUKSCH-2013-FLM, title = "Fast Light-Map Computation with Virtual Polygon Lights", author = "Christian Luksch and Robert F. Tobler and Ralf Habel and Michael Schw\"{a}rzler and Michael Wimmer", year = "2013", abstract = "We propose a new method for the fast computation of light maps using a many-light global-illumination solution. A complete scene can be light mapped on the order of seconds to minutes, allowing fast and consistent previews for editing or even generation at loading time. In our method, virtual point lights are clustered into a set of virtual polygon lights, which represent a compact description of the illumination in the scene. The actual light-map generation is performed directly on the GPU. Our approach degrades gracefully, avoiding objectionable artifacts even for very short computation times. ", month = mar, isbn = "978-1-4503-1956-0", publisher = "ACM", location = "Orlando, Florida", booktitle = "Proceedings of ACM Symposium on Interactive 3D Graphics and Games 2013", pages = "87--94", keywords = "instant radiosity, global illumination, light-maps", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/LUKSCH-2013-FLM/", } @incollection{schedl-2013-gP4, title = "Simulating partial occlusion in post-processing depth-of-field methods", author = "David Schedl and Michael Wimmer", year = "2013", abstract = "This chapter describes a method for simulating Depth of Field (DoF). In particular, we investigate the so-called partial occlusion effect: objects near the camera blurred due to DoF are actually semitransparent and therefore result in partially visible background objects. This effect is strongly apparent in miniature- and macro photography and in film making. Games and interactive applications are nowadays becoming more cinematic, including strong DoF effects, and therefore it is important to be able to convincingly approximate the partial-occlusion effect. We show how to do so in this chapter; with the proposed optimizations even in real time.", month = mar, booktitle = "GPU Pro 4: Advanced Rendering Techniques", editor = "Wolfgang Engel", isbn = "9781466567436", note = "to appear", publisher = "A K Peters", keywords = "depth of field, realtime, layers, blurring", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/schedl-2013-gP4/", } @article{Kehrer-2013-STAR, title = "Visualization and Visual Analysis of Multi-faceted Scientific Data: A Survey", author = "Johannes Kehrer and Helwig Hauser", year = "2013", abstract = "Visualization and visual analysis play important roles in exploring, analyzing and presenting scientific data. In many disciplines, data and model scenarios are becoming multi-faceted: data are often spatio-temporal and multi-variate; they stem from different data sources (multi-modal data), from multiple simulation runs (multi-run/ensemble data), or from multi-physics simulations of interacting phenomena (multi-model data resulting from coupled simulation models). Also, data can be of different dimensionality or structured on various types of grids that need to be related or fused in the visualization. This heterogeneity of data characteristics presents new opportunities as well as technical challenges for visualization research. Visualization and interaction techniques are thus often combined with computational analysis. In this survey, we study existing methods for visualization and interactive visual analysis of multi-faceted scientific data. Based on a thorough literature review, a categorization of approaches is proposed. We cover a wide range of fields and discuss to which degree the different challenges are matched with existing solutions for visualization and visual analysis. This leads to conclusions with respect to promising research directions, for instance, to pursue new solutions for multi-run and multi-model data as well as techniques that support a multitude of facets.", month = mar, issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", note = "Spotlight paper of the March issue of TVCG", number = "3", volume = "19", pages = "495--513", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Kehrer-2013-STAR/", } @mastersthesis{pangerl_2013_GPD, title = "Games with a Purpose: Design and implementation of serious games to determine the global land cover", author = "Dietmar Pangerl", year = "2013", abstract = "Remote sensing of land cover has, not at least because of the rapidly growing world population, gained scientific and economic importance. Several international projects have aimed to determine and map out the global land cover. Unfortunately the results of remote sensing projects such as GLC-2000, MODIS or GlobCover are often ambiguous or have significant differences. The project Geo-Wiki.org examines the results of that remote sensing projects and tries to validate them with the help of volunteers. This task requires a large amount of participants. Among other, competitions animate volunteers to contribute to the crowdsourcing project. To motivate volunteers to participate in the remote sensing of land cover is one of the key aspects within the Geo-Wiki.org project. One approach to reach new users for the project is to develop computer games that implement the classification of land cover areas as part of the game. The project Landspotting is a platform that aims to develop Serious Games for land cover classification. This thesis focuses on the development of a computer game for mobile devices that implements the classification of land cover as an integral part. Previous projects such as the game Landspotting for the platform Facebook are presented in the thesis. This games inspired the design process. Mobile devices with built-in, touch-sensitive displays offer the possibility to classify the land cover by painting appropriate sections of satellite images on the screen. The results of this categorizations are compared with data provided by the Geo-Wiki.org project. The result of this comparison is the basis for the achieved progress in the game. The as a part of this thesis developed computer game Landspotting was published on the 4th of January 2013. The game was downloaded and installed from a lot of players. The huge amount of land cover data obtained by players illustrates the great potential in the combination of crowdsourcing systems and serious games. The comparison of the data obtained permits both, conclusions about the quality of the results of the developed computer game, as well as on the quality of the results of the Geo-Wiki.org project.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Landspotting", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/pangerl_2013_GPD/", } @article{Viola_Ivan_2013_CAI, title = "Computer-aided image geometry analysis and subset selection for optimizing texture quality in photorealistic models", author = "Aleksandra Anna Sima and Xavier Bonaventura and Miquel Feixas and Mateu Sbert and John Anthony Howell and Ivan Viola and Simon John Buckley", year = "2013", abstract = "Photorealistic 3D models are used for visualization, interpretation and spatial measurement in many disciplines, such as cultural heritage, archaeology and geoscience. Using modern image- and laser-based 3D modelling techniques, it is normal to acquire more data than is finally used for 3D model texturing, as images may be acquired from multiple positions, with large overlap, or with different cameras and lenses. Such redundant image sets require sorting to restrict the number of images, increasing the processing efficiency and realism of models. However, selection of image subsets optimized for texturing purposes is an example of complex spatial analysis. Manual selection may be challenging and time-consuming, especially for models of rugose topography, where the user must account for occlusions and ensure coverage of all relevant model triangles. To address this, this paper presents a framework for computer-aided image geometry analysis and subset selection for optimizing texture quality in photorealistic models. The framework was created to offer algorithms for candidate image subset selection, whilst supporting refinement of subsets in an intuitive and visual manner. Automatic image sorting was implemented using algorithms originating in computer science and information theory, and variants of these were compared using multiple 3D models and covering image sets, collected for geological applications. The image subsets provided by the automatic procedures were compared to manually selected sets and their suitability for 3D model texturing was assessed. Results indicate that the automatic sorting algorithms are a promising alternative to manual methods. An algorithm based on a greedy solution to the weighted set-cover problem provided image sets closest to the quality and size of the manually selected sets. The improved automation and more reliable quality indicators make the photorealistic model creation workflow more accessible for application experts, increasing the user’s confidence in the final textured model completeness.", month = mar, journal = "Computers & Geosciences", volume = "52", pages = "281--291", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_CAI/", } @inproceedings{SCHWAERZLER-2013-FPCSS, title = "Fast Percentage Closer Soft Shadows using Temporal Coherence", author = "Michael Schw\"{a}rzler and Christian Luksch and Daniel Scherzer and Michael Wimmer", year = "2013", abstract = "We propose a novel way to efficiently calculate soft shadows in real-time applications by overcoming the high computational effort involved with the complex corresponding visibility estimation each frame: We exploit the temporal coherence prevalent in typical scene movement, making the estimation of a new shadow value only necessary whenever regions are newly disoccluded due to camera adjustment, or the shadow situation changes due to object movement. By extending the typical shadow mapping algorithm by an additional light-weight buffer for the tracking of dynamic scene objects, we can robustly and efficiently detect all screen space fragments that need to be updated, including not only the moving objects themselves, but also the soft shadows they cast. By applying this strategy to the popular Percentage Closer Soft Shadow algorithm (PCSS), we double rendering performance in scenes with both static and dynamic objects - as prevalent in various 3D game levels - while maintaining the visual quality of the original approach.", month = mar, isbn = "978-1-4503-1956-0", publisher = "ACM", location = "Orlando, Florida", address = "New York, NY, USA", booktitle = "Proceedings of ACM Symposium on Interactive 3D Graphics and Games 2013", pages = "79--86", keywords = "real-time, temporal coherence, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/SCHWAERZLER-2013-FPCSS/", } @article{knecht_martin_2013_ReflRefrObjsMR, title = "Reflective and Refractive Objects for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Christoph Winklhofer and Michael Wimmer", year = "2013", abstract = "In this paper, we present a novel rendering method which integrates reflective or refractive objects into a differential instant radiosity (DIR) framework usable for mixed-reality (MR) applications. This kind of objects are very special from the light interaction point of view, as they reflect and refract incident rays. Therefore they may cause high-frequency lighting effects known as caustics. Using instant-radiosity (IR) methods to approximate these high-frequency lighting effects would require a large amount of virtual point lights (VPLs) and is therefore not desirable due to real-time constraints. Instead, our approach combines differential instant radiosity with three other methods. One method handles more accurate reflections compared to simple cubemaps by using impostors. Another method is able to calculate two refractions in real-time, and the third method uses small quads to create caustic effects. Our proposed method replaces parts in light paths that belong to reflective or refractive objects using these three methods and thus tightly integrates into DIR. In contrast to previous methods which introduce reflective or refractive objects into MR scenarios, our method produces caustics that also emit additional indirect light. The method runs at real-time frame rates, and the results show that reflective and refractive objects with caustics improve the overall impression for MR scenarios.", month = mar, journal = "IEEE Transactions on Visualization and Computer Graphics (Proceedings of IEEE VR 2013)", volume = "19", number = "4", issn = "1077-2626", pages = "576--582", keywords = "Mixed Reality, Caustics, Reflections, Refractions", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/knecht_martin_2013_ReflRefrObjsMR/", } @article{fink-2013-cag, title = "Teaching a Modern Graphics Pipeline Using a Shader-based Software Renderer", author = "Heinrich Fink and Thomas Weber and Michael Wimmer", year = "2013", abstract = "This paper presents the syllabus for an introductory computer graphics course that emphasizes the use of programmable shaders while teaching raster-level algorithms at the same time. We describe a Java-based framework that is used for programming assignments in this course. This framework implements a shader-enabled software renderer and an interactive 3D editor. Teaching shader programming in concert with the low-level graphics pipeline makes it easier for our students to learn modern OpenGL with shaders in our follow-up intermediate course. We also show how to create attractive course material by using COLLADA, an open standard for 3D content exchange, and our approach to organizing the practical course.", month = feb, issn = "0097-8493", journal = "Computers & Graphics", number = "1--2", volume = "37", pages = "12--20", keywords = "teaching, programmable shading, CG education, course organization, COLLADA", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/fink-2013-cag/", } @bachelorsthesis{jahrmann_klemens_KFR, title = "Kinect Fusion - Reconstruction", author = "Klemens Jahrmann", year = "2013", abstract = "The procedure of collecting 3D data via an input device and processing it to a virtual 3D model is called 3D reconstruction. It is a widely used technique in visual computing, since modern applications like games or visualizations tend to be more and more photo-realistic leading to high costs in content creation. By using 3D reconstruction high quality geometry can be generated out of real objects. However to obtain good reconstructions special hardware is needed which is very expensive. Since Microsoft released the Kinect camera, which has a depth sensor in addition to the RGB-sensor, a quite cheap hardware is available that is able to extract 3D data of its surroundings. KinectFusion also developed by Microsoft is a technique that uses the Kinect camera for 3D reconstruction in real-time. In order to achieve real-time speed the algorithm is executed almost exclusively on the graphics card. Each frame the algorithm first gathers the information from the Kinect and processes it. After that it measures the camera’s position in space and fills a 3D volume with surface data. Finally a raycasting algorithm is used to extract isosurfaces out of the volume. During the work on the thesis we implemented the KinectFusion algorithm inside the RESHADE framework. The results and the implementation itself are presented as part of the thesis.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Reconstruction, Kinect Fusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/jahrmann_klemens_KFR/", } @phdthesis{Reisner_Irene_2013_R3D, title = "Reconstruction of 3D Models from Images and Point Clouds with Shape Primitives", author = "Irene Reisner-Kollmann", year = "2013", abstract = "3D models are widely used in different applications, including computer games, planning software, applications for training and simulation, and virtual city maps. For many of these applications it is necessary or at least advantageous, if the virtual 3D models are based on real world scenes and objects. Manual modeling is reserved for experts as it requires extensive skills. For this reason, it is necessary to provide automatic or semi-automatic, easy-to-use techniques for reconstructing 3D objects. In this thesis we present methods for reconstructing 3D models of man-made scenes. These scenes can often be approximated with a set of geometric primitives, like planes or cylinders. Using geometric primitives leads to light-weight, low-poly 3D models, which are beneficial for efficient storage and post-processing. The applicability of reconstruction algorithms highly depends on the existing input data, the characteristics of the captured objects, and the desired properties of the reconstructed 3D model. For this reason, we present three algorithms that use different input data. It is possible to reconstruct 3D models from just a few photographs or to use a dense point cloud as input. Furthermore, we present techniques to combine information from both, images and point clouds. The image-based reconstruction method is especially designed for environments with homogenous and reflective surfaces where it is difficult to acquire reliable point sets. Therefore we use an interactive application which requires user input. Shape primitives are fit to user-defined segmentations in two or more images. Our point-based algorithms, on the other hand, provide fully automatic reconstructions. Nevertheless, the automatic computations can be enhanced by manual user inputs for generating improved results. The first point-based algorithm is specialized on reconstructing 3D models of buildings and uses unstructured point clouds as input. The point cloud is segmented into planar regions and converted into 3D geometry. The second point-based algorithm additionally supports the reconstruction of interior scenes. While unstructured point clouds are supported as well, this algorithm specifically exploits the redundancy and visibility information provided by a set of range images. The data is automatically segmented into geometric primitives. Then the shape boundaries are extracted either automatically or interactively.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Reisner_Irene_2013_R3D/", } @bachelorsthesis{Prast_Stefanie_2013-CLG, title = "Caustics, Light Shafts, God Rays", author = "Stefanie Prast and Anna Fr\"{u}hst\"{u}ck", year = "2013", abstract = "Lighting effects, such as caustics and light shafts are an important component of the rendering of global illumination images. The correct depiction of the interaction of light with different surfaces is crucial to the realism of any rendered scene. Dealing with the complexity of global illumination has long been among the biggest challenges in computer graphics, a problem that is even more prominent when it comes to rendering interactive environments. Particularly the simulation of caustics is a difficult task since they can only be rendered satisfactorily through techniques which trace the light from the illuminants. Several different techniques to speed up the process of rendering realistic global illumination effects have been developed. Among those are path tracing, ray tracing and photon mapping. Most stateof- the-art rendering techniques rely heavily on the computation power of the GPU. We wish to present a survey of current rendering techniques for approximating physically exact representations of caustics, light shafts and god rays.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "light effects, refraction, god rays, caustics, real-time, rendering, reflection, GPU", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Prast_Stefanie_2013-CLG/", } @bachelorsthesis{Spechtenhauser_Florian_2013_SIU, title = "Shape Interpolation Using Diffusion Isosurfaces", author = "Florian Spechtenhauser", year = "2013", abstract = "I present a diffusion based shape interpolation method which is applicable to 2D and 3D surfaces. As input, 2D shapes are represented as diffusion curves, the 3D shapes are simply 3D meshes. The algorithm generates an exact Voronoi diagram of two surfaces along with a distance map, both are stored as textures for further manipulation and lookup. The Voronoi diagram is used as starting point for the iterative color diffusion. After the diffusion step, an isovalue can be applied to the resulting texture. By varying the isovalue, different intermediate surfaces between the two input surfaces arise. In 2D, Diffusion Curves [3] are used as input, whereas in 3D, textured surface meshes as used. This shape interpolation method is applicable to every kind of shape that can be represented by diffusion curves or 3D surface meshes.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Spechtenhauser_Florian_2013_SIU/", } @phdthesis{Konyha_2013_IVA, title = "Interactive Visual Analysis in Automotive Engineering Design", author = "Zoltan Konyha", year = "2013", abstract = "Computational simulation has become instrumental in the design process in automotive engineering. Virtually all components and subsystems of automobiles can be simulated. The simulation can be repeated many times with varied parameter settings, thereby simulating many possible design choices. Each simulation run can produce a complex, multivariate, and usually timedependent result data set. The engineers’ goal is to generate useful knowledge from those data. They need to understand the system’s behavior, find correlations in the results, conclude how results depend on the parameters, find optimal parameter combinations, and exclude the ones that lead to undesired results. Computational analysis methods are widely used and necessary to analyze simulation data sets, but they are not always sufficient. They typically require that problems and interesting data features can be precisely defined from the beginning. The results of automated analysis of complex problems may be difficult to interpret. Exploring trends, patterns, relations, and dependencies in time-dependent data through statistical aggregates is not always intuitive. In this thesis, we propose techniques and methods for the interactive visual analysis (IVA) of simulation data sets. Compared to computational methods, IVA offers new and different analysis opportunities. Visual analysis utilizes human cognition and creativity, and can also incorporate the experts’ domain knowledge. Therefore, their insight into the data can be amplified, and also less precisely defined problems can be solved. We introduce a data model that effectively represents the multi-run, time-dependent simulation results as families of function graphs. This concept is central to the thesis, and many of the innovations in this thesis are closely related to it.We present visualization techniques for families of function graphs. Those visualizations, as well as well-known information visualization plots, are integrated into a coordinated multiple views framework. All views provide focus+context visualization. Compositions of brushes spanning several views can be defined iteratively to select interesting features and promote information drill-down. Valuable insight into the spatial aspect of the data can be gained from (generally domain-specific) spatio-temporal visualizations. In this thesis, we propose interactive, glyph-based 3D visualization techniques for the analysis of rigid and elastic multibody system simulations. We integrate the on-demand computation of derived data attributes of families of function graphs into the analysis workflow. This facilitates the selection of deeply hidden data features that cannot be specified by combinations of simple brushes on the original data attributes. The combination of these building blocks supports interactive knowledge discovery. The analyst can build a mental model of the system; explore also unexpected features and relations; and generate, verify or reject hypotheses with visual tools; thereby gaining more insight into the data. Complex tasks, such as parameter sensitivity analysis and optimization can be solved. Although the primary motivation for our work was the analysis of simulation data sets in automotive engineering, we learned that this data model and the analysis procedures we identified are also applicable to several other problem domains. We discuss common tasks in the analysis of data containing families of function graphs. Two case studies demonstrate that the proposed approach is indeed applicable to the analysis of simulation data sets in automotive engineering. Some of the contributions of this thesis have been integrated into a commercially distributed software suite for engineers. This suggests that their impact can extend beyond the visualization research community.", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Konyha_2013_IVA/", } @mastersthesis{Sorger_2013_nMI, title = "neuroMap - Interactive Graph-Visualization of the Fruit Fly’s Neural Circuit", author = "Johannes Sorger", year = "2013", abstract = "Neuroscientists study the function of neural circuits in the brain of the common fruit fly Drosophila Melanogaster to discover how complex behavior is generated. Through a combination of molecular-genetic techniques and confocal microscopy the scientists are able to highlight single neurons and produce three-dimensional images of the fly’s brain. Neurons are segmented, annotated, and compiled into a digital atlas. Brain atlases offer tools for exploring and analyzing their underlying data. To establish models of neural information processing, knowledge about possible connections between individual neurons is necessary. Connections can occur when arborizations (the terminal branchings of nerve fibers) of two neurons are overlapping. However, analyzing overlapping objects using traditional volumetric visualization is difficult since the examined objects occlude each other. A more abstract form of representation is therefore required. The work in this thesis was motivated by a manually constructed two-dimensional circuit diagram of potential neuronal connections that represents a novel way of visualizing neural connectivity data. Through abstracting the complex volumetric data, the diagram offers an intuitive and clear overview of potential connectivity. In collaboration with a group of neuroscientists neuroMap was designed and implemented in an attempt to deliver the visual features and encoded information of this circuit diagram in an automatically generated interactive graph, with the goal of facilitating hypothesis formation and exploration of neural connectivity. In this thesis the visual and interaction design decisions that went into neuroMap are presented, as well as the result of evaluative discussions that shows that the integration of this novel type of visualization into the existing datamining infrastructure of our clients is indeed beneficial to their research.", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Sorger_2013_nMI/", } @article{arikan-2013-osn, title = "O-Snap: Optimization-Based Snapping for Modeling Architecture", author = "Murat Arikan and Michael Schw\"{a}rzler and Simon Fl\"{o}ry and Michael Wimmer and Stefan Maierhofer", year = "2013", abstract = "In this paper, we introduce a novel reconstruction and modeling pipeline to create polygonal models from unstructured point clouds. We propose an automatic polygonal reconstruction that can then be interactively refined by the user. An initial model is automatically created by extracting a set of RANSAC-based locally fitted planar primitives along with their boundary polygons, and then searching for local adjacency relations among parts of the polygons. The extracted set of adjacency relations is enforced to snap polygon elements together, while simultaneously fitting to the input point cloud and ensuring the planarity of the polygons. This optimization-based snapping algorithm may also be interleaved with user interaction. This allows the user to sketch modifications with coarse and loose 2D strokes, as the exact alignment of the polygons is automatically performed by the snapping. The generated models are coarse, offer simple editing possibilities by design and are suitable for interactive 3D applications like games, virtual environments etc. The main innovation in our approach lies in the tight coupling between interactive input and automatic optimization, as well as in an algorithm that robustly discovers the set of adjacency relations.", month = jan, journal = "ACM Transactions on Graphics", volume = "32", number = "1", issn = "0730-0301", doi = "10.1145/2421636.2421642", pages = "6:1--6:15", keywords = "interactive modeling, surface reconstruction, geometric optimization", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/arikan-2013-osn/", } @phdthesis{Schulze_Florian_2013_CMI, title = "Computational Methods enabling Interactivity in Analysis and Exploration of Volumetric Images", author = "Florian Schulze", year = "2013", abstract = "Volumetric imaging is widely used in medicine and life sciences allowing to gain insight into otherwise opaque objects. Volumetric images do not unveil their content in a direct way due to their spatial structure. Therefore a variety of computational methods are used for visualization and processing which allow to explore and analyze the data. Analysis and exploration of the data is usually performed in an interactive way either manually or with support of semi-automatic algorithms. It is crucial for an efficient completion of the task that the system performs interactively and responsively. Thus, software supporting the user in an effective way relies on three basic requirements. First, the system must deliver feedback in a short period of time. Second, results of any computation must be presented or visualized in a way that the user can efficiently recognize the important information. Third, the user must be able to efficiently control, initialize or adjust the algorithm through a suitable user interface. In this thesis four approaches are presented which aim to solve different aspects of the problem of enabling interactivity in analysis and exploration of volumetric image data. The first presented project studies the design of an application which has strict limitations concerning the user interface due to the application environment which requires almost a hands free interaction. The problem is approached by the development of efficient and robust visualization which makes adjustments needless, and by the development of sophisticated interaction patterns which reduce the needed interface to the minimum. The second project focuses on methods which optimize a computationally intensive feature detection task that can be used in an interactive scenario which requires the algorithm to produce results in just a few seconds. To achieve this goal the probabilistic boosting tree classification algorithm is extended and optimized for runtime and memory efficiency. The third and the fourth project focus on the interactive exploration of large image and object collections. Two approaches are presented for this problem area. For the retrieval of neuronal objects by similarity, measures for different neuronal substructures have been developed which are able to perform efficiently on large amounts of data. For retrieval of images and objects by local means such as neighborhood, overlap, local image expression and local image similarity a sophisticated updatable high performance index structure has been developed. The index allows to store local properties of volumetric data in a space efficient way and to retrieve this data with low latency. The presented projects demonstrate that the challenge of achieving interactivity often is the development of methods which allow to balance processing speed with result quality. Furthermore it is shown that time performance is not the only property which needs to be respected,result presentation as well as interaction patterns deserve similar attention and contribute greatly to an interactive user experience.", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Schulze_Florian_2013_CMI/", } @mastersthesis{bbeer-2013, title = "Visualisierung von Eishockeystatistiken auf mobilen Endger\"{a}ten", author = "Benjamin Beer", year = "2013", abstract = "The last few years brought big progress in the field of Information visualization. There are many new and interesting ways to process and visualize the huge amount of data we collect in our everyday lifes, to make it easier to understand the meaning of the data. Nevertheless it is surprising, that it is very hard to find literature about visualizations of sport statistics, even though this data poses interesting challenges for finding and evaluating new visualization methods. In this thesis we evaluate and examine sport statistics, particularly icehockey statistics and we are looking for existing and new ways, to visualize them. In particular we are focusing on methods, which are suitable for mobile devices like smartphones and tablets, because those have totally different requirements than normal desktop computers. We also build a prototype, which visualizes different icehockey statistics: one to get the most important information of a league on one screen (league standings, past and future games), and a visualization, where the user can compare the statistics of two teams. We evaluated our method by comparing it with a regular table of statistics. Despite showing the strengths and weaknesses of the new method, the evaluation also posed some interesting approaches for future work in the area of sports visualization on mobile devices.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/bbeer-2013/", } @xmascard{x-mas-2013, title = "X-Mas Card 2013", author = "Michael Birsak and Przemyslaw Musialski", year = "2013", abstract = "With our Season’s Greetings we are sending you an automatically generated brochure that provides routing information for several of Vienna’s Christ- mas markets. In the middle you can see an overview map that provides approximate locations as well as routing information. Around the map you can see so-called ‘detail lenses’ that provide more exact maps and routing as well as names and photographs. All elements are arranged using a binary integer program (BIP), whose goal is the positioning of the detail lenses as close as possible to their correspond- ing markers in the overview map.", keywords = "x-mas card", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/x-mas-2013/", } @mastersthesis{carbesser-2013, title = "Large-Scale Noise Simulation and Visualization of Moving Point Sources", author = "Clemens Arbesser", year = "2013", abstract = "Noise pollution is an ever increasing problem not just in urban environments but also in more rural areas such as small villages, along country roads or even in very sparsely populated regions. The demands of the industry and local governments often clash with the interests of people in the neighborhood, creating areas of conflict that often end up in court. Though in many countries noise assessments are mandatory in order to obtain building permission, these documents are usually not suited or sometimes conceivably not even intended to convey the impact of projects on their environment to the general public. The purpose of this master’s thesis is to propose ways to simulate and visualize noise pollution in large-scale, non-urban environments in order to help communicate the impact of new sound emitters on affected neighbors. Knowledge of noise propagation, the influence of the terrain and other obstacles as well as how different emitters add up can provide valuable insights and help in the decision-making process. This knowledge may be particularly helpful when trying to decide on suitable locations for noise screens and/or when trying to find good places to offset some of the local noise emitters. The tool developed uses NVIDIA’s CUDA architecture and the European norm ISO 9613-2: Attenuation of sound during propagation outdoors to create real-time visualizations in both 2D and 3D. Results are compared against ground truth data obtained by taking noise measurements in the field.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/carbesser-2013/", } @talk{ilcik-2013-pmsg, title = "Procedural Modeling with Shape Grammars", author = "Martin Il\v{c}\'{i}k", year = "2013", abstract = "Formal languages are used in all areas of computer science including computer graphics. I will focus on procedural modeling, which provides means for automated design and generation of 3D models using grammars. Repeating, symmetric and similar parts of models can be easily encoded. However, abstract description of irregular geometry is connected with several difficult and interesting problems. Starting with well known L-Systems, I will guide you through the world of shape generating grammars up to the state of the art layered concepts. At the end, I will show examples how to model plants, houses and furniture.", event = "Seminar of the Institute of Computer Science", location = "Faculty of Mathematics, Physics and Informatics, Comenius University, Bratislava", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/ilcik-2013-pmsg/", } @talk{kehrer-2013-IVA, title = "Visual Analysis of Multi-faceted Scientific Data: Challenges and Trends", author = "Johannes Kehrer", year = "2013", event = "Karlsruhe Institute of Technology", location = "Karlsruhe, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/kehrer-2013-IVA/", } @article{mindek-2013-pel, title = "Visual Parameter Exploration in GPU Shader Space", author = "Peter Mindek and Stefan Bruckner and Peter Rautek and Eduard Gr\"{o}ller", year = "2013", abstract = "The wide availability of high-performance GPUs has made the use of shader programs in visualization ubiquitous. Understanding shaders is a challenging task. Frequently it is dif?cult to mentally reconstruct the nature and types of transformations applied to the underlying data during the visualization process. We propose a method for the visual analysis of GPU shaders, which allows the ?exible exploration and investigation of algorithms, parameters, and their effects. We introduce a method for extracting feature vectors composed of several attributes of the shader, as well as a direct manipulation interface for assigning semantics to them. The user interactively classi?es pixels of images which are rendered with the investigated shader. The two resulting classes, a positive class and a negative one, are employed to steer the visualization. Based on this information, we can extract a wide variety of additional attributes and visualize their relation to this classi?cation. Our system allows an interactive exploration of shader space and we demonstrate its utility for several different applications.", journal = "Journal of WSCG", volume = "21", number = "3", issn = "1213-6972", pages = "225--234", keywords = "shader augmentation, parameter space exploration", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-pel/", } @studentproject{steiner-2013-BS, title = "Voxel Cone Tracing", author = "Bernhard Steiner", year = "2013", abstract = "Voxel cone tracing can simulate global illumination effect in real-time. This project tries to evaluate how plausible the simulation is and how well it scales in terms of performance and quality.", keywords = "Voxel, Voxel Cone Tracing, Cone Tracing, Indirect Illumination, Global Illumination", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/steiner-2013-BS/", } @bachelorsthesis{szabo_2013_ssrr, title = "Rasterized Curved Reflections in Screen Space", author = "Attila Szabo", year = "2013", abstract = "The rendering of reflections on mirror-like objects is an important operation performed in image synthesis. Being able to calculate the reflections on reflective surfaces in a rendered scene helps visualize many materials which have such properties and aids the viewer in recognizing objects and the perception of distance relations between them. Considering the increasing use of computer systems in day-to-day life, there is much interest in implementing methods that are able to render these reflections at interactive framerates for use in interactive systems, such as computer games and virtual reality. In this paper one given state-of-the-art method and two possible extensions are examined. The method is designed for rendering accurate reflections of geometry on the surface of a curved reflector, utilizing the capabilities of the rendering pipelines implemented on contemporary graphics hardware, in real-time. It is based around finding the reflection point for each vertex of a geometry object, and then letting the graphics hardware rasterize the reflected geometry using the found points. Two important problems with this approach are that the search for the reflection point can take a long time, and that the linear interpolation used in the rasterizing step leads to artifacts on the reflector’s curved surface. The first examined modification is aimed at reducing the time needed for the search of a reflection point by using a hierachial data structure, and an accordingly different searching technique, while still storing the data as efficiently as before. The second modification attempts to reduce the linear interpolation error in the final image by tessellating the reflected geometry. This is done adaptively based on a metric for the error reduced. Finally, some results are presented and discussed and some ideas for possible future work in this field is given.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Curved Reflections, Ray-Space Hierarchy, Adaptive Tessellation", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/szabo_2013_ssrr/", } @talk{Groeller_Edi_VKA, title = "Visualisation using Knowledge Assisted Sparse Interaction", author = "Eduard Gr\"{o}ller", year = "2013", abstract = "Knowledge-Assisted Visualization: - LiveSync: Knowledge-Based Navigation - Contextual Picking - Knowledge-Assisted Sparse Interaction - Semantics Driven Illustrative Rendering - Smart Super Views", event = "VIGOR++ Workshop 2013 – Advances in VPH Technologies and the VIGOR++ Tools", location = "Academic Medical Center, Amsterdam, The Netherlands", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Groeller_Edi_VKA/", } @bachelorsthesis{koessler-2013-BA, title = "Feature-Adaptive Catmull-Clark Subdivision on the GPU", author = "Christian K\"{o}{\ss}ler", year = "2013", abstract = "Catmull-Clark subdivision is a powerful standard modeling technique and has already been used extensively in CGI for motion pictures or computer games. An artist creates a coarse polygon mesh, that is computationally converted into a high-quality smooth surface. Due to the recursive nature of the subdivision algorithm and the large number of polygons, that are generated during the mesh-refinement, it is not well suited for realtime-environments. There exist several approaches to generate a Catmull-Clark subdivision surface which use current GPU technologies to overcome these issues. In this thesis, we use Compute Shaders for subdivision and a Cubic Bezier Patch which takes advantage of the tessellation pipeline to get an optimized algorithm without the loss of visual quality. With the presented method, the support of (Semi)Sharp Creases, which are an important feature to achieve a more realistic look, is also given. The practical part of this thesis is integrated into the Helix 3D Toolkit SharpDX Framework. ", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "subdivision surfaces, geometric modeling, GPU tessellation, computer graphics", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/koessler-2013-BA/", } @inproceedings{EISEMANN-2013-ERT, title = "Efficient Real-Time Shadows", author = "Elmar Eisemann and Ulf Assarsson and Michael Schwarz and Michal Valient and Michael Wimmer", year = "2013", abstract = "This course provides an overview of efficient, real-time shadow algorithms. It presents the theoretical background but also discusses implementation details for facilitating efficient realizations (hard and soft shadows, volumetric shadows, reconstruction techniques). These elements are of relevance to both experts and practitioners. The course also reviews budget considerations and analyzes performance trade-offs, using examples from various AAA game titles and film previsualization tools. While physical accuracy can sometimes be replaced by plausible shadows, especially for games, film production requires more precision, such as scalable solutions that can deal with highly detailed geometry. The course builds upon earlier SIGGRAPH courses as well as the recent book Real-Time Shadows (A K Peters, 2011) by four of the instructors (due to its success, a second edition is planned for 2014). And with two instructors who have worked on AAA game and movie titles, the course presents interesting behind-the-scenes information that illuminates key topics.", booktitle = "ACM SIGGRAPH 2013 Courses", isbn = "978-1-4503-2339-0", location = "Anaheim, CA", publisher = "ACM", pages = "18:1--18:54", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/EISEMANN-2013-ERT/", } @studentproject{charpenay-2013-PR, title = "Adaptive Garbor Noise Sampling", author = "Victor Charpenay", year = "2013", keywords = "Garbor Noise, Procedural Texture", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/charpenay-2013-PR/", } @mastersthesis{zapotocky_2013_ma, title = "Image-Based Modeling with Polyhedral Primitives", author = "Stephan Zapotocky", year = "2013", abstract = "The reconstruction of 3D models out of image data plays an important role in many research areas. Previous work has brought fully automatic or semi-automatic algorithms for extracting 3D data out of multiple images or videos. The output is usually a 3D point cloud, which is a big challenge when further manipulation of the scanned object, such as modeling and texturing, is needed. The goal of this master thesis is the research on methods for creating simple geometry out of such initial data. In the process of the work a modeling tool will be implemented, which enables users to integrate polyhedral primitives in a 3D point dataset. Being assisted by optimization techniques, the user shall be able to simplify such a 3D point cloud to a combination of basic geometric objects, which allow an easy manipulation in further tasks.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "interactive modeling, image-based modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/zapotocky_2013_ma/", } @talk{Viola_Ivan_2013_DC, title = "Declarative Visualization", author = "Ivan Viola", year = "2013", abstract = "Visualization algorithms are nowadays formalized in an imperative manner, i.e. the algorithm is explicitly executed on input data and dictates a determined visualization outcome. The efficiency of such an algorithm is measured by means of the computational performance, data-scalability and user studies. In my talk I will speculate on a novel theoretical concept for the development of new visualization methodology that becomes ultimately declarative and algorithm-free, by moving the user study from a validation stage into the center of the iterative design stage. Initial visualization from input data is considered as the first design draft, which will undergo several revisions. This draft can be achieved by executing a traditional imperative algorithm or it can even be hand-crafted by a skilled illustrator. A consequent user study of initial visualization will trigger computational synthesis of a new, quantitatively more effective visualization technique. The visualization designs developed through several iterations of the study-redesign cycle will become declarative, aiming at optimally satisfying the purpose of the visualization, instead of explicit execution of algorithms on the input data. The declarative component will be specified by collected user statistics from completing certain perceptual or cognitive tasks. The user statistics will be analyzed for systematic trends in human perceptual and cognitive performance. These trends will form a basis for visualization redesign. Final satisfactory visualization will evolve over several design iterations.", event = "SCCG 2013", location = "Smolenice castle, Slovakia", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_DC/", } @bachelorsthesis{schwankl-2013-smis, title = "Splitting of Meshes in Image-Space", author = "Barbara Schwankl", year = "2013", abstract = "Representing complex 3D data is no problem with modern technologies. The challenge is to reveal data that is concealed by solid geometry and retaining its context at the same time. In this thesis, several approaches are presented that deal with this problem by finding user-centered solutions that are adjusted for each individual requirement. Moreover, a simple algorithm is proposed that combines existing approaches to reveal occluded structures. Therefore, a descriptive implementation of this algorithm is shown with VolumeShop, an application that flexibly supports visualization research.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/schwankl-2013-smis/", } @talk{mindek-2013-cs_cvut, title = "Contextual Snapshots: Enriched Visualization with Interactive Spatial Annotations", author = "Peter Mindek", year = "2013", event = "Scientific meeting of Department of Computer Graphics and Interaction", location = "Czech Technical University in Prague", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-cs_cvut/", } @talk{musialski-2013-prag, title = "Facade Reconstruction: An Interactive Image-Based Approach", author = "Przemyslaw Musialski", year = "2013", abstract = "Modeling and reconstruction of urban environments is currently the subject of intensive research. In this talk I will present one specific subfield of urban reconstruction: interactive image-based facade reconstruction. In particular I will introduce insights into image-based interactive approaches which aim at the decomposition of facade imagery in order to generate well defined geometric models.", event = "Invited Talk at Czech Technical University in Prague", location = "Czech Technical University in Prague, Department of Computer Graphics and Interaction, Prague", keywords = "facade reconstruction, urban reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/musialski-2013-prag/", } @talk{Purgathofer-2013-fhg, title = "Visual Computing Research inspired by Applications", author = "Werner Purgathofer", year = "2013", abstract = "Almost all areas of life can be improved with Visual Computing techniques. Research, however, often focusses on assumed problems, guessed by academic researchers. In the research center VRVis, founded 13 years ago to transfer academic knowledge to industry, research topics are motivated and steered by real world needs, leading to real solutions for practical purposes. The talk will present several such research projects, and explore some skills these researchers need and have developed to fulfill this challenging task.", event = "Science Meets Business, FhG", location = "Graz", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Purgathofer-2013-fhg/", } @studentproject{zauner-2013-svkinect, title = "Smartvis with Kinect Support", author = "Ulrike Zauner", year = "2013", abstract = "We want to enable the user to explore volume data in a novel way. Using the Kinect camera, we obtain depth information that is then used to calculate a cutting plane. Moving objects in front of the camera, e.g. a sheet of paper or ones hands, back and forth, the cutting plane is moved through the volume data, creating an intuitive way for exploration. Additional parameters are exposed in the GUI to modify depth information.", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/zauner-2013-svkinect/", } @talk{Groeller_Edi_2013TVCK, title = "Trends in Visual Computing", author = "Eduard Gr\"{o}ller", year = "2013", event = "Conference on Graphics, Patterns and Images", location = "Arequipa, Peru", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Groeller_Edi_2013TVCK/", } @talk{Groeller_Edi_2013_TVC, title = "Trends in Visual Computing", author = "Eduard Gr\"{o}ller", year = "2013", event = "CD-adapco Customer Advisory Council Meeting", location = "Vienna, Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Groeller_Edi_2013_TVC/", } @talk{Groeller_Edi_2013_VCQ, title = "Visual Computing - Quo Vadis?", author = "Eduard Gr\"{o}ller", year = "2013", event = "CS-Colloquium of the Faculty of Computer Science", location = "University of Vienna", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Groeller_Edi_2013_VCQ/", } @talk{Purgathofer-2013-cvws, title = "Accurate Fast Simulation of Light", author = "Werner Purgathofer", year = "2013", abstract = "Light distribution in a scene is a very complex issue if it shall be close to realism. For a lamp producing and light planning company such as Zumtobel it is of great value to be able to design installations interactively, providing immediate feedback to the costumers about the final result. Many aspects such as reflections and indirect lighting make this task difficult. This talk will give some ideas how a project at the research center VRVis approaches this topic, and which algorithms are useful for this. Where can we simplify without visible loss? How can we use the GPU to speed up the calculations? Why are virtual lights an efficient concept? Some example images provide evidence. ", event = "The 18th Computer Vision Winter Workshop", location = "Hernstein, Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Purgathofer-2013-cvws/", } @bachelorsthesis{brenner_simon-2013-ba, title = "Neighbor detection in point clouds using the Boundary Complex", author = "Simon Brenner", year = "2013", abstract = "For many applications (e.g. robotic vision) it is important to separate the Point Clouds produced by a 3D scanner into logically associated clusters in order to recognize objects or surfaces. A crucial part of most of the existing segmentation approaches is the definition of neighborhood between points. This thesis describes and evaluates the use of the ‘Boundary Complex’, a spanning connected set of triangles on the point cloud, for neighbor detection in the context of cloud segmentation. The results will be compared to existing neighbor picking approaches.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "segmentation, neighbor detection, clustering, boundary complex, point cloud", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/brenner_simon-2013-ba/", } @bachelorsthesis{Streicher-bachelor-2013, title = "Interactive Scene Manipulation Techniques for Ray Tracing", author = "Kevin Streicher", year = "2013", abstract = "'Ray tracing is the future and will ever be'. This was the title of the ray tracing course at SIGGRAPH 2013, which shows what an important field of research ray tracing currently is. The most important reason ray tracing or path tracing has not yet replaced rasterization are the long computation times. While ray and path tracing already have replaced rasterization for offline rendering in general, we still rarely see it in real time applications. With a lot of promising results presented in the last years we can expect ray tracing to become more popular in the next years. In interactive applications fast response times are needed to create smooth and usable tools. Many different things influence the final rendering time, like the number of refractive and reflective objects, pixels covered by those objects and objects in general. We have developed a basic scene designing tool using ray tracing and have benchmarked different code styles and the impact of various of these parameters on the final render time. We also present a simple technique to determine which regions require re-rendering when changes are introduced to the scene, allowing to save a considerable amount of computation time. When using scene editing designing tools, for certain features, it is usually desirable to trade artifacts, higher noise levels or reduced image quality for faster render times during interaction. In this thesis we propose different options for interactive scene designing and and our benchmark results as well as the implementation of our scene designing tool.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "real-time, ray tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Streicher-bachelor-2013/", } @studentproject{schuetz_markus-2013-pra, title = "Real-time Consistent Meshing", author = "Markus Sch\"{u}tz", year = "2013", abstract = "create a mesh for a point cloud by constructing near-consistent umbrellas at points. near-consistency is ensured by determining the umbrella with lexically minimum edges. the first goal is to find this umbrella with very few evaluations and, if necessary, high probability. the second task is implement the entire process on the GPU and to display the consistent umbrellas.", keywords = "nearest neighbors, meshing, real-time, delaunay triangulation", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/schuetz_markus-2013-pra/", } @studentproject{laager_florian-2013-camr, title = "Camera Artifacts in Mixed Reality", author = "Florian Laager", year = "2013", abstract = "Simulating camera artifacts for better immersion of virtual objects in a real environment", keywords = "Mixed Reality, Camera Artifacts", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/laager_florian-2013-camr/", } @bachelorsthesis{hafner_christian-2013-oflow, title = "Image to Geometry Projection", author = "Christian Hafner", year = "2013", abstract = "The projection of images onto a 3D model is a widely applicable way to acquire appearance information of an object. The first step of this procedure is the alignment of the images on the 3D model. While any reconstruction pipeline aims at avoiding misregistration by improving camera calibrations and geometry, remaining errors show up either as ghosting or as discontinuities at transitions from one camera view to another. Correcting the local misalignment by determining the necessary displacement offers a solution to aforementioned artifacts.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "optical flow", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/hafner_christian-2013-oflow/", } @mastersthesis{winklhofer_christoph-2013-RRMR, title = "Reflections, Refractions and Caustics in a Mixed-Reality Environment", author = "Christoph Winklhofer", year = "2013", abstract = "In a mixed-reality environment virtual objects are merged into a real scene. Such an augmentation with virtual objects offers great possibilities to present content in new and innovative ways. The visual appearance of these virtual objects depends on a plausible lighting simulation. Otherwise, virtual objects look artificial and out of place, which destroys the overall impression of the perceived scene. Reflective and refractive objects are an inherent part of our physical environment. Accordingly, virtual objects of this type also enhance the overall impression and scope of a mixed-reality application. Many mixed-reality systems still neglect them: Such objects require a complex light simulation that is hard to embed in a mixed-reality system, which demands real-time frame rates to handle the user interaction. This thesis describes the integration of reflective and refractive objects in a mixed-reality environment. The aim is to create a realistic light distribution that simulates reflections and refractions between real and virtual objects. Another important aspect for a believable perception are caustics, light focusing due to the scattering from reflective or refractive objects. Until recently, this effect was simply excluded in the lighting simulation of mixed-reality systems. The proposed rendering method extends differential instant radiosity with three other image space rendering techniques capable to handle reflections, refractions and caustics in real time. By combining these techniques, our method successfully simulates the various lighting effects from reflective and refractive objects and is able to handle user interactions at interactive to realtime frame rates. This offers a practicable possibility to greatly improve the visual quality of a mixed-reality environment.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Caustics, Reflections, Mixed Reality, Refractions", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/winklhofer_christoph-2013-RRMR/", }