@phdthesis{miao_thesis_2019, title = "Geometric Abstraction for Effective Visualization and Modeling", author = "Haichao Miao", year = "2019", abstract = "In this cumulative thesis, I describe geometric abstraction as a strategy to create an integrated visualization system for spatial scientific data. The proposed approach creates a multitude of representations of spatial data in two dominant ways. Along the spatiality axis, it gradually removes spatial details and along the visual detail axis, the features are increasingly aggregated and represented by different visual objects. These representations are then integrated into a conceptual abstraction space that enables users to efficiently change the representation to adjust the abstraction level to a task in mind. To enable the expert to perceive correspondence between these representations, controllable animated transitions are provided. Finally, the abstraction space can record user interactions and provides visual indications to guide the expert towards interesting representations for a particular task and data set. Mental models of the experts play a crucial role in the understanding of the abstract representations and are considered in the design of the visualization system to keep the cognitive load low on the user’s side. This approach is demonstrated in two distinct fields of placenta research and in silico design of DNA nanostructures. For both fields geometric abstraction facilitates effective visual inspection and modeling. The Adenita toolkit, a software for the design of novel DNA nanostructures, implements the proposed visualization concepts. This toolkit, together with the proposed visualization concepts, is currently deployed to several research groups to help them in nanotechnology research.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/miao_thesis_2019/", } @phdthesis{birsak-thesis, title = "Discrete Optimization on Graphs and Grids for the Creation of Navigational and Artistic Imagery", author = "Michael Birsak", year = "2018", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/birsak-thesis/", } @phdthesis{Muehlbacher_diss_2018, title = "Human-Oriented Statistical Modeling: Making Algorithms Accessible through Interactive Visualization", author = "Thomas M\"{u}hlbacher", year = "2018", abstract = "Statistical modeling is a key technology for generating business value from data. While the number of available algorithms and the need for them is growing, the number of people with the skills to effectively use such methods lags behind. Many application domain experts find it hard to use and trust algorithms that come as black boxes with insufficient interfaces to adapt. The field of Visual Analytics aims to solve this problem by a human-oriented approach that puts users in control of algorithms through interactive visual interfaces. However, designing accessible solutions for a broad set of users while re-using existing, proven algorithms poses significant challenges for the design of analytical infrastructures, visualizations, and interactions. This thesis provides multiple contributions towards a more human-oriented modeling process: As a theoretical basis, it investigates how user involvement during the execution of algorithms can be realized from a technical perspective. Based on a characterization of needs regarding intermediate feedback and control, a set of formal strategies to realize user involvement in algorithms with different characteristics is presented. Guidelines for the design of algorithmic APIs are identified, and requirements for the re-use of algorithms are discussed. From a survey of frequently used algorithms within R, the thesis concludes that a range of pragmatic options for enabling user involvement in new and existing algorithms exist and should be used. After these conceptual considerations, the thesis presents two methodological contributions that demonstrate how even inexperienced modelers can be effectively involved in the modeling process. First, a new technique called TreePOD guides the selection of decision trees along trade-offs between accuracy and other objectives, such as interpretability. Users can interactively explore a diverse set of candidate models generated by sampling the parameters of tree construction algorithms. Visualizations provide an overview of possible tree characteristics and guide model selection, while details on the underlying machine learning process are only exposed on demand. Real-world evaluation with domain experts in the energy sector suggests that TreePOD enables users with and without statistical background a confident identification of suitable decision trees. As the second methodological contribution, the thesis presents a framework for interactive building and validation of regression models. The framework addresses limitations of automated regression algorithms regarding the incorporation of domain knowledge, identifying local dependencies, and building trust in the models. Candidate variables for model refinement are ranked, and their relationship with the target variable is visualized to support an interactive workflow of building regression models. A real-world case study and feedback from domain experts in the energy sector indicate a significant effort reduction and increased transparency of the modeling process. All methodological contributions of this work were implemented as part of a commercially distributed Visual Analytics software called Visplore. As the last contribution, this thesis reflects upon years of experience in deploying Visplore for modeling-related tasks in the energy sector. Dissemination and adoption are important aspects of making statistical models more accessible for domain experts, making this work relevant for practitioners and application-oriented researchers alike.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Muehlbacher_diss_2018/", } @phdthesis{schwaerzler_2018_phd, title = "Advances in the Multimodal 3D Reconstruction and Modeling of Buildings", author = "Michael Schw\"{a}rzler", year = "2018", abstract = "Driven by the need for faster and more efficient workflows in the digitization of urban environments, the availability of affordable 3D data-acquisition systems for buildings has drastically increased in the last years: Laser scanners and photogrammetric methods both produce millions of 3D points within minutes of acquisition time. They are applied both on street-level as well as from above using drones, and are used to enhance traditional tachymetric measurements in surveying. However, these 3D data points are not the only available information: Extracted meta data from images, simulation results (e.g., from light simulations), 2D floor plans, and semantic tags – especially from the upcoming Building Information Modeling (BIM) systems – are becoming increasingly important. The challenges this multimodality poses during the reconstruction of CAD-ready 3D buildings are manifold: Apart from handling the enormous size of the data that is collected during the acquisition steps, the different data sources must also be registered to each other in order to be applicable in a common context – which can be difficult in case of missing or erroneous information. Nevertheless, the potential for improving both the workflow efficiency as well as the quality of the reconstruction results is huge: Missing information can be substituted by data from other sources, information about spatial or semantic relations can be utilized to overcome limitations, and interactive modeling complexity can be reduced (e.g., by limiting interactions to a two-dimensional space). In this thesis, four publications are presented which aim at providing freely combinable “building blocks” for the creation of helpful methods and tools for advancing the field of Multimodal Urban Reconstruction. First, efficient methods for the calculation of shadows cast by area light sources are presented – one with a focus on the most efficient generation of physically accurate penumbras, and the other one with the goal of reusing soft shadow information in consecutive frames to avoid costly recalculations. Then, a novel, optimization-supported reconstruction and modeling tool is presented, which employs sketch-based interactions and snapping techniques to create water-tight 3D building models. An extension to this system is demonstrated consecutively: There, 2D photos act as the only interaction canvas for the simple, sketch-based creation of building geometry and the corresponding textures. Together, these methods form a solid foundation for the creation of common, multimodal environments targeted at the reconstruction of 3D building models.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/schwaerzler_2018_phd/", } @phdthesis{waldin-2017-thesis, title = "Using and Adapting to Limits of Human Perception in Visualization", author = "Nicholas Waldin", year = "2017", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/waldin-2017-thesis/", } @phdthesis{preiner_2017_phd, title = "Dynamic and Probabilistic Point-Cloud Processing", author = "Reinhold Preiner", year = "2017", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/preiner_2017_phd/", } @phdthesis{sorger-2017-thesis, title = "Integration Strategies in the Visualization of Multifaceted Spatial Data", author = "Johannes Sorger", year = "2017", abstract = "Visualization designers have several visual channels at their disposal for encoding data into visual representations, e.g., position, size, shape, orientation, color, texture, brightness, as well as motion. The mapping of attributes to visual channels can be chosen by the designer. In theory, any data attribute can be represented by any of these visual channels or by a combination of multiple of these channels. In practice, the optimal mapping and the most suitable type of visualization strongly depend on the data as well as on the user's task. In the visualization of spatial data, the mapping of spatial attributes to visual channels is inherently given by the data. Multifaceted spatial data possesses a wide range of additional (non-spatial) attributes without a given mapping. The data's given spatial context is often important for successfully fulfilling a task. The design space in spatial data visualization can therefore be heavily constrained when trying to choose an optimal mapping for other attributes within the spatial context. To solve an exploration or presentation task in the domain of multifaceted spatial data, special strategies have to be employed in order to integrate the essential information from the various data facets in an appropriate representation form with the spatial context. This thesis explores visualization integration strategies for multifaceted spatial data. The first part of this thesis describes the design space of integration in terms of two aspects: visual and functional integration. Visual integration describes how representations of the different data facets can be visually composed within a spatial context. Functional integration, describes how events that have been triggered, for instance, through user interaction, can be coordinated across the various visually integrated representations. The second part of this thesis describes contributions to the field of visualization in the context of concrete integration applications for exploration and presentation scenarios. The first scenario addresses a set of challenges in the exploratory analysis of multifaceted spatial data in the scope of a decision making scenario in lighting design. The user's task is to find an optimal lighting solution among dozens or even hundreds of potential candidates. In the scope of a design study, the challenges in lighting design are addressed with LiteVis, a system that integrates representations of the simulation parameter space with representations of all relevant aspects of the simulation output. The integration of these heterogeneous aspects together with a novel ranking visualization are thereby the key to enabling an efficient exploration and comparison of lighting parametrizations. In presentation scenarios, the generation of insights often cannot rely on user interaction and therefore needs a different approach. The challenge is to generate visually appealing, yet information-rich representations for mainly passive observation. In this context, this thesis addresses two different challenges in the domain of molecular visualization. The first challenge concerns the conveying of relations between two different representations of a molecular data set, such as a virus. The relation is established via animated transitions - a temporal form of integration between two representations. The proposed solution features a novel technique for creating such transitions that are re-usable for different data sets, and can be combined in a modular fashion. Another challenge in presentation scenarios of multifaceted spatial data concerns the presentation of the transition between development states of molecular models, where the actual biochemical process of the transition is not exactly known or it is too complex to represent. A novel technique applies a continuous abstraction of both model representations to a level of detail at which the relationship between them can be accurately conveyed, in order to overcome a potential indication of false relationship information. Integration thereby brings the different abstraction levels and the different model states into relation with each other. The results of this thesis clearly demonstrate that integration is a versatile tool in overcoming key challenges in the visualization of multifaceted spatial data. ", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/sorger-2017-thesis/", } @phdthesis{LeMuzic_2016_PhD, title = "From Atoms to Cells: Interactive and Illustrative Visualization of Digitally Reproduced Lifeforms", author = "Mathieu Le Muzic", year = "2016", abstract = "Macromolecules, such as proteins, are the building blocks of the machinery of life, and therefore are essential to the comprehension of physiological processes. In physiology, illustrations and animations are often utilized as a mean of communication because they can easily be understood with little background knowledge. However, their realization requires numerous months of manual work, which is both expensive and time consuming. Computational biology experts produce everyday large amount of data that is publicly available and that contains valuable information about the structure and also the function of these macromolecules. Instead of relying on manual work to generate illustrative visualizations of the cell biology, we envision a solution that would utilize all the data already available in order to streamline the creation process. In this thesis are presented several contributions that aim at enabling our vision. First, a novel GPU-based rendering pipeline that allows interactive visualization of realistic molecular datasets comprising up to hundreds of millions of macromolecules. The rendering pipeline is embedded into a popular game engine and well known computer graphics optimizations were adapted to support this type of data, such as level-of-detail, instancing and occlusion queries. Secondly, a new method for authoring cutaway views and improving spatial exploration of crowded molecular landscapes. The system relies on the use of clipping objects that are manually placed in the scene and on visibility equalizers that allows fine tuning of the visibility of each species present in the scene. Agent-based modeling produces trajectory data that can also be combined with structural information in order to animate these landscapes. The snapshots of the trajectories are often played in fast-forward to shorten the length of the visualized sequences, which also renders potentially interesting events occurring at a higher temporal resolution invisible. The third contribution is a solution to visualize time-lapse of agent-based simulations that also reveals hidden information that is only observable at higher temporal resolutions. And finally, a new type of particle-system that utilize quantitative models as input and generate missing spatial information to enable the visualization of molecular trajectories and interactions. The particle-system produces a similar visual output as traditional agent-based modeling tools for a much lower computational footprint and allows interactive changing of the simulation parameters, which was not achievable with previous methods.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/LeMuzic_2016_PhD/", } @phdthesis{schmidt-phd, title = "Scalable Comparative Visualization", author = "Johanna Schmidt", year = "2016", abstract = "The comparison of two or more objects is getting an increasingly important task in data analysis. Visualization systems successively have to move from representing one phenomenon to allowing users to analyze several datasets at once. Visualization systems can support the users in several ways. Firstly, comparison tasks can be supported in a very intuitive way by allowing users to place objects that should be compared in an appropriate context. Secondly, visualization systems can explicitly compute differences among the datasets and present the results to the user. In comparative visualization, researchers are working on new approaches for computer-supported techniques that provide data comparison functionality. Techniques from this research field can be used to compare two objects with each other, but often reach their limits if a multitude of objects (i.e., 100 or more) have to be compared. Large data collections that contain a lot of individual, but related, datasets with slightly different characteristics can be called ensembles. The individual datasets being part of an ensemble are called the ensemble members. Ensembles have been created in the simulation domain, especially for weather and climate research, for already quite some time. These domains were greatly driving the development of ensemble visualization techniques. Due to the availability of affordable computing resources and the multitude of different analysis algorithms (e.g., for segmentation), other domains nowadays also face similar problems. All together, this shows a great need for ensemble visualization techniques in various domains. Ensembles can either be analyzed in a feature-based or in a location-based way. In the case of a location-based analysis, the ensemble members are compared based on certain spatial data positions of interest. For such an analysis, local selection and analysis techniques for ensembles are needed. In the course of this thesis different visual analytics techniques for the comparative visualization of datasets have been researched. A special focus has been set on providing scalable techniques, which makes them also suitable for ensemble datasets. The proposed techniques operate on different dataset types in 2D and 3D. In the first part of the thesis, a visual analytics approach for the analysis of 2D image datasets is introduced. The technique analyzes localized differences in 2D images. The approach not only identifies differences in the data, but also provides a technique to quickly find out what the differences are, and judge upon the underlying data. This way patterns can be found in the data, and outliers can be identified very quickly. As a second part of the thesis, a scalable application for the comparison of several similar 3D mesh datasets is described. Such meshes may be, for example, created by point-cloud reconstruction algorithms, using different parameter settings. Similar to the proposed technique for the comparison of 2D images, this application is also scalable to a large number of individual datasets. The application enables the automatic comparison of the meshes, searches interesting regions in the data, and allows users to also concentrate on local regions of interest. The analysis of the local regions is in this case done in 3D. The application provides the possibility to arrange local regions in a parallel coordinates plot. The regions are represented by the axes in the plot, and the input meshes are depicted as polylines. This way it can be very quickly spotted whether meshes produce good/bad results in a certain local region. In the third and last part of the thesis, a technique for the interactive analysis of local regions in a volume ensemble dataset is introduced. Users can pick regions of interest, and these regions can be arranged in a graph according to their similarity. The graph can then be used to detect similar regions with a similar data distribution within the ensemble, and to compare individual ensemble members against the rest of the ensemble. All proposed techniques and applications have been tested with real-world datasets from different domains. The results clearly show the usefulness of the techniques for the comparative analysis of ensembles.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/schmidt-phd/", } @phdthesis{karimov-2016-GIVE, title = "Guided Interactive Volume Editing in Medicine", author = "Alexey Karimov", year = "2016", abstract = "Various medical imaging techniques, such as Computed Tomography, Magnetic Resonance Imaging, Ultrasonic Imaging, are now gold standards in the diagnosis of different diseases. The diagnostic process can be greatly improved with the aid of automatic and interactive analysis tools, which, however, require certain prerequisites in order to operate. Such analysis tools can, for example, be used for pathology assessment, various standardized measurements, treatment and operation planning. One of the major requirements of such tools is the segmentation mask of an object-of-interest. However, the segmentation of medical data remains subject to errors and mistakes. Often, physicians have to manually inspect and correct the segmentation results, as (semi-)automatic techniques do not immediately satisfy the required quality. To this end, interactive segmentation editing is an integral part of medical image processing and visualization. In this thesis, we present three advanced segmentation-editing techniques. They are focused on simple interaction operations that allow the user to edit segmentation masks quickly and effectively. These operations are based on a topology-aware representation that captures structural features of the segmentation mask of the object-of-interest. Firstly, in order to streamline the correction process, we classify segmentation defects according to underlying structural features and propose a correction procedure for each type of defect. This alleviates users from manually applying the proper editing operations, but the segmentation defects still have to be located by users. Secondly, we extend the basic editing process by detecting regions that potentially contain defects. With subsequently suggested correction scenarios, users are hereby immediately able to correct a specific defect, instead of manually searching for defects beforehand. For each suggested correction scenario, we automatically determine the corresponding region of the respective defect in the segmentation mask and propose a suitable correction operation. In order to create the correction scenarios, we detect dissimilarities within the data values of the mask and then classify them according to the characteristics of a certain type of defect. Potential findings are presented with a glyph-based visualization that facilitates users to interactively explore the suggested correction scenarios on different levels-of-detail. As a consequence, our approach even offers users the possibility to fine-tune the chosen correction scenario instead of directly manipulating the segmentation mask, which is a time-consuming and cumbersome task. Third and finally, we guide users through the multitude of suggested correction scenarios of the entire correction process. After statistically evaluating all suggested correction scenarios, we rank them according to their significance of dissimilarities, offering fine-grained editing capabilities at a user-specified level-of-detail. As we visually convey this ranking in a radial layout, users can easily spot and select the most (or the least) dissimilar correction scenario, which improves the segmentation mask mostly towards the desired result. All techniques proposed within this thesis have been evaluated by collaborating radiologists. We assessed the usability, interaction aspects, the accuracy of the results and the expenditure of time of the entire correction process. The outcome of the assessment showed that our guided volume editing not only leads to acceptable segmentation results with only a few interaction steps, but also is applicable to various application scenarios.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/karimov-2016-GIVE/", } @phdthesis{Smisek_Michal_A3D, title = "Analysis of 3D and 4D Images of Organisms in Embryogenesis", author = "Michal Sm\'{i}\v{s}ek", year = "2015", abstract = "In this work, we present a few modifications to the state-of-the-art algorithms, as well as several novel approaches, related to the detection of cells in biological image processing. We start by explanation of a PDE-based image processing evolution called FBLSCD and study its properties. We then define a fully automatic way of finding the stop time for this evolution. Afterwards, we try to see the FBLSCD as a morphological grayscale erosion, and we formulate a novel cell detection algorithm, called LSOpen, as an intersection of PDE-based and morphological image processing schools. Then, we discuss the best ways of inspecting cell detection results, i.e. cell identifiers. We try to quantitatively benchmark various cell detection methods by the relative amount of false positives, false negatives and multiply-detected centers yielded. We will observe that comparing cell detection results in a binary fashion is insufficient, therefore we are going to utilize the concept of distance function. Motivated by this need for robust cell detection result comparison, we analyze commonly-used methods for computing the distance function and afterwards we formulate a novel algorithm. This one has complexity O(n log2 n) and it yields Euclidean distance. In addition to that, we introduce a modification to this algorithm, enabling it to work also in maze-like, wall- and corner-containing, environments. This modification relies on the line rasterization algorithm. We perform various experiments to study and compare distance function methods. Results illustrate the viability of newly-proposed method. Further, a software for the comparing and inspecting cell detection results, SliceViewer, is specified, designed, implemented and tested. In the end, quantitative experiments are discussed, validating the above-mentioned novelties. ", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Smisek_Michal_A3D/", } @phdthesis{Reh_Andreas_VoP, title = "Visualization of Porosity in Carbon Fiber Reinforced Polymers", author = "Andreas Reh", year = "2015", abstract = "Industrial research is continuously increasing efforts in designing new-tailored light-weight materials in order to meet the high demands regarding efficiency, environment, safety as well as comfort. Especially in the aeronautics industry a high demand for advanced composite materials is observable. The new generations of aircrafts are made of more than 50 % of these novel composite materials. Carbon fiber reinforced polymers (CFRPs) are currently considered as the most promising candidate since this material is outperforming the majority of conventional materials. As a result of the manufacturing process this material tends to have pores inside. Pores in the material are typically inclusions of air. As they have an impact on the mechanical properties of the component, their determination and evaluation is an important task in quality control and a particular challenge for non-destructive testing (NDT) practitioners. Besides the characterization of individual pores, their spatial distribution in the tested component is a relevant factor. For example, a high concentration of pores in certain regions leads to different material characteristics as compared to a homogenous distribution of the pores. This work is based on 3D X-ray Computed Tomography (XCT) to gain new insight into CFRP components. Based on domain experts’ questions, specific tasks were derived. Besides the quantitative porosity determination, the main visualization tasks are: giving a fast porosity overview, exploring the individual pores, and tracking features over time based on XCT time-series. In this thesis, three novel visual analysis tools are presented to solve these tasks. To enhance the evaluation workflow for non-destructive testing (NDT) practitioners, a visualization pipeline for the interactive exploration and visual analysis of CFRP specimens is developed. After the calculation of local pore properties, i.e., volume, surface, extents and shape factors, a drill-down approach is employed to explore pores in a CFRP specimen. Therefore Porosity Maps (PM) are presented to allow for a fast porosity overview and selecting a region of interest. Pores in this region may be filtered and visualized with a parallel-coordinates selection. Furthermore a novel visualization technique which allows for a fast porosity overview and exploration of pores by focusing more on their shapes is proposed. In this method, all objects (pores) are clustered into a Mean Object (MObject). To explore this MObject, the visualization of mean object sets (MObject Sets) in a radial and a parallel alignment is introduced. By selecting a specific property such as the volume or shape factor and the desired number of classes, a MObject is split up into sub-classes. With this approach, intended classifications and visualizations of MObjects may be explored by the user. These representative MObjects may be exported as volumetric datasets to serve as input for successive calculations and simulations. For an overview of the pore properties in the dataset local MObjects are calculated in a grid and combined with a color-coded homogeneity visualization. Both approaches were evaluated with real-world CFRP specimens. To go one step further, time as a fourth dimension is added to analyze a process over time, e.g., how the features evolve and formate over time. Therefore features in a series of XCT scans are tracked with the Fuzzy Feature Tracking approach and are then visualized together with the extracted events in multiple linked-views, each emphasizing individual aspects of the 4D time-series data. Spatial feature information, global temporal overview, and global temporal evolution of how the features are tracked and connected over the whole time-series are covered with the visual-analysis system. The results and advantages of the Fuzzy Feature Tracking tool are demonstrated using various real-world applications, such as AlSiC alloys under thermal load or wood shrinkage analyses.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Carbon Fiber Reinforced Polymers, MObjects, Porosity, Visual Analysis, Visualization, Industrial Computed Tomography", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Reh_Andreas_VoP/", } @phdthesis{mindek-thesis, title = "Interactive Integrated Exploration and Management of Visualization Parameters", author = "Peter Mindek", year = "2015", abstract = "Visualization algorithms are parameterized to offer universality in terms of handling various data types, showing different aspects of the visualized data, or producing results useful for domain experts from different fields. Hence, input parameters are an important aspect of the visualization process. Their exploration and management are tasks which enable the visualization reusability, portability, and interdisciplinary communication. With increasing availability of visualization systems, which are suitable for a great variety of tasks, their complexity increases as well. This usually involves many input parameters necessary for the meaningful visualization of data. Multiple input parameters form parameter spaces which are too large to be explored by brute-force. Knowing the properties of a parameter space is often beneficial for improving data visualization. Therefore, it is important for domain experts utilizing data visualization to have tools for automatic parameter specification and for aiding the manual parameter setting. In this thesis, we review existing approaches for parameter-space visualization, exploration, and management. These approaches are used with a great variety of underlying algorithms. We focus on their applicability to visualization algorithms. We propose three methods solving specific problems arising from the fact that the output of a visualization algorithm is an image, which is challenging to process automatically and often needs to be analyzed by a human. First, we propose a method for the exploration of parameter-spaces of visualization algorithms. The method is used to understand effects of combinations of parameters and parts of the internal structure of the visualization algorithms on the final image result. The exploration is carried out by specifying semantics for localized parts of the visualization images in the form of positive and negative examples influenced by a set of input parameters or parts of the visualization algorithm itself. After specifying the localized semantics, global effects of the specified components of the visualization algorithm can be observed. The method itself is independent from the underlying algorithm. Subsequently, we present a method for managing image-space selections in visualizations and automatically link them with the context in which they were created. The context is described by the values of the visualization parameters influencing the output image. The method contains a mechanism for linking additional views to the selections, allowing the user an effective management of the visualization parameters whose effects are localized to certain areas of the visualizations. We present various applications for the method, as well as an implementation in the form of a library, which is ready to be used in existing visualization systems. Our third method is designed to integrate dynamic parameters stored during a multiplayer video game session by the individual participating players. For each player, the changing parameter values of the game describe their view of the gameplay. Integrating these multiple views into a single continuous visual narrative provides means for effective summarization of gameplays, useful for entertainment, or even gameplay analysis purposes by semi-professional or professional players. We demonstrate the utility of our approach on an existing video game by producing a gameplay summary of a multiplayer game session. The proposed method opens possibilities for further research in the areas of storytelling, or at a more abstract level, parameter integration for visual computing algorithms. ", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/mindek-thesis/", } @phdthesis{auzinger-2015-phd, title = "Sampled and Prefiltered Anti-Aliasing on Parallel Hardware", author = "Thomas Auzinger", year = "2015", abstract = "A fundamental task in computer graphics is the generation of two-dimensional images. Prominent examples are the conversion of text or three-dimensional scenes to formats that can be presented on a raster display. Such a conversion process - often referred to as rasterization or sampling - underlies inherent limitations due to the nature of the output format. This causes not only a loss of information in the rasterization result, which manifests as reduced image sharpness, but also causes corruption of the retained information in form of aliasing artifacts. Commonly observed examples in the final image are staircase artifacts along object silhouettes or Moire-like patterns. The main focus of this thesis is on the effective removal of such artifacts - a process that is generally referred to as anti-aliasing. This is achieved by removing the offending input information in a filtering step during rasterization. In this thesis, we present different approaches that either minimize computational effort or emphasize output quality. We follow the former objective in the context of an applied scenario from medical visualization. There, we support the investigation of the interiors of blood vessels in complex arrangements by allowing for unrestricted view orientation. Occlusions of overlapping blood vessels are minimized by automatically generating cut-aways with the help of an occlusion cost function. Furthermore, we allow for suitable extensions of the vessel cuts into the surrounding tissue. Utilizing a level of detail approach, these cuts are gradually smoothed with increasing distance from their respective vessels. Since interactive response is a strong requirement for a medical application, we employ fast sample-based anti-aliasing methods in the form of visibility sampling, shading supersampling, and post-process filtering. We then take a step back and develop the theoretical foundations for anti-aliasing methods that follow the second objective of providing the highest degree of output quality. As the main contribution in this context, we present exact anti-aliasing in the form of prefiltering. By computing closed-form solutions of the filter convolution integrals in the continuous domain, we circumvent any issues that are caused by numerical integration and provide mathematically correct results. Together with a parallel hidden-surface elimination, which removes all occluded object parts when rasterizing three-dimensional scenes, we present a ground-truth solution for this setting with exact anti-aliasing. We allow for complex illumination models and perspective-correct shading by combining visibility prefiltering with shading sampling and generate a ground-truth solution for multisampling anti-aliasing. All our aforementioned methods exhibit highly parallel workloads. Throughout the thesis, we present their mapping to massively parallel hardware architectures in the form of graphics processing units. Since our approaches do not map to conventional graphics pipelines, we implement our approach using general-purpose computing concepts. This results in decreased runtime of our methods and makes all of them interactive.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/auzinger-2015-phd/", } @phdthesis{Bernhard_Matthias_2014_VAC, title = "Visual Attention in Computer Graphics", author = "Matthias Bernhard", year = "2014", abstract = "This thesis is concerned with gaze analysis methods to study visual attention in interactive 3D computer-graphics applications, such as virtual environments or computer games. Under this scope, research has been carried out in two directions: On the one hand, it was investigated how gaze analysis in three-dimensional virtual environments can be advanced. On the other hand, approaches were explored which improve three-dimensional graphics by taking into account visual attention of a user. To advance gaze analysis in 3D computer graphics applications, two challenges have been addressed: First, inferring the object of attention at a certain point in time from the current output of an eye tracker – a technique which we denote as gaze-to-object mapping –, and second, deriving a statistical model for visual attention - a data structure we denote as importance map - from sequences of gaze samples recorded from many users. While addressing these challenges is a crucial step towards advancing gaze analysis and research on visual attention which employs modern computer graphics, the results may also be used in applications which attempt to perceptually optimize rendering. Thus, the third challenge addressed in this thesis was to explore an example application for attention-aware rendering techniques, where gaze-to-object mapping or importance maps can be employed to determine or predict the object of attention at run time. Thus, this thesis concludes with a pilot study on an application that dynamically adjusts the configuration of a stereo 3D display such that the object being attended by the user can be seen most comfortably.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Bernhard_Matthias_2014_VAC/", } @phdthesis{Guerrero_Paul_2014_EPG, title = "Edit Propagation using Geometric Analogies", author = "Paul Guerrero", year = "2014", abstract = "Modeling complex geometrical shapes, like city scenes or terrains with dense vegetation, is a time-consuming task that cannot be automated trivially. The problem of creating and editing many similar, but not identical models requires specialized methods that understand what makes these objects similar in order to either create new variations of these models from scratch or to propagate edit operations from one object to all similar objects. In this thesis, we present new methods to significantly reduce the effort required to model complex scenes. For 2D scenes containing deformable objects, such as fish or snakes, we present a method to find partial matches between deformed shapes that can be used to transfer localized properties such as texture between matching shapes. Shapes are considered similar if they are related by pointwise correspondences and if neighboring points have correspondences with similar transformation parameters. Unlike previous work, this approach allows us to successfully establish matches between strongly deformed objects, even in the presence of occlusions and sparse or unevenly distributed sets of matching features. For scenes consisting of 2D shape arrangements, such as floor plans, we propose methods to find similar locations in the arrangements, even though the arrangements themselves are dissimilar. Edit operations, such as object placements, can be propagated between similar locations. Our approach is based on simple geometric relationships between the location and the shape arrangement, such as the distance of the location to a shape boundary or the direction to the closest shape corner. Two locations are similar of they have many similar relations to their surrounding shape arrangement. To the best of our knowledge, there is no method that explicitly attempts to find similar locations in dissimilar shape arrangements. We demonstrate populating large scenes such as floor plans with hundreds of objects like pieces of furniture, using relatively few edit operations. Additionally, we show that providing several examples of an edit operation helps narrowing down the supposed modeling intention of the user and improves the quality of the edit propagation. A probabilistic model is learned from the examples and used to suggest similar edit operations. Also, extensions are shown that allow application of this method in 3D scenes. Compared to previous approaches that use entire scenes as examples, our method provides more user control and has no need for large databases of example scenes or domain-specific knowledge. We demonstrate generating 3D interior decoration and complex city scenes, including buildings with detailed facades, using only few edit operations.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Guerrero_Paul_2014_EPG/", } @phdthesis{scheiblauer-thesis, title = "Interactions with Gigantic Point Clouds", author = "Claus Scheiblauer", year = "2014", abstract = "During the last decade the increased use of laser range-scanners for sampling the environment has led to gigantic point cloud data sets. Due to the size of such data sets, tasks like viewing, editing, or presenting the data have become a challenge per se, as the point data is too large to fit completely into the main memory of a customary computer system. In order to accomplish these tasks and enable the interaction with gigantic point clouds on consumer grade computer systems, this thesis presents novel methods and data structures for efficiently dealing with point cloud data sets consisting of more than 109 point samples. To be able to access point samples fast that are stored on disk or in memory, they have to be spatially ordered, and for this a data structure is proposed which organizes the points samples in a level-of-detail hierarchy. Point samples stored in this hierarchy cannot only be rendered fast, but can also be edited, for example existing points can be deleted from the hierarchy or new points can be inserted. Furthermore, the data structure is memory efficient, as it only uses the point samples from the original data set. Therefore, the memory consumption of the point samples on disk, when stored in this data structure, is comparable to the original data set. A second data structure is proposed for selecting points. This data structure describes a volume inside which point samples are considered to be selected, and this has the advantage that the information about a selection does not have to be stored at the point samples. In addition to these two previously mentioned data structures, which represent novel contributions for point data visualization and manipulation, methods for supporting the presentation of point data sets are proposed. With these methods the user experience can be enhanced when navigating through the data. One possibility to do this is by using regional meshes that employ an out-of-core texturing method to show details in the mesoscopic scale on the surface of sampled objects, and which are displayed together with point clouds. Another possibility to increase the user experience is to use graphs in 3D space, which helps users to orient themselves inside point cloud models of large sites, where otherwise it would be difficult to find the places of interest. Furthermore, the quality of the displayed point cloud models can be increased by using a point size heuristics that can mimic a closed surface in areas that would otherwise appear undersampled, by utilizing the density of the rendered points in the different areas of the point cloud model. Finally, the use of point cloud models as a tool for archaeological work is proposed. Since it becomes increasingly common to document archaeologically interesting monuments with laser scanners, the number application areas of the resulting point clouds is raising as well. These include, but are not limited to, new views of the monument that are impossible when studying the monument on-site, creating cuts and floor plans, or perform virtual anastylosis. All these previously mentioned methods and data structures are implemented in a single software application that has been developed during the course of this thesis and can be used to interactively explore gigantic point clouds.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "point-based rendering, out-of-core rendering, data structures, complexity analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/scheiblauer-thesis/", } @phdthesis{knecht_2013_RSM, title = "Reciprocal Shading for Mixed Reality", author = "Martin Knecht", year = "2013", abstract = "Reciprocal shading for mixed reality aims to integrate virtual objects into real environments in a way that they are in the ideal case indistinguishable from real objects. It is therefore an attractive technology for architectural visualizations, product visualizations and for cultural heritage sites, where virtual objects should be seamlessly merged with real ones. Due to the improved performance of recent graphics hardware, real-time global illumination algorithms are feasible for mixed-reality applications, and thus more and more researchers address realistic rendering for mixed reality. The goal of this thesis is to provide algorithms which improve the visual plausibility of virtual objects in mixed-reality applications. Our contributions are as follows: First, we present five methods to reconstruct the real surrounding environment. In particular, we present two methods for geometry reconstruction, a method for material estimation at interactive frame rates and two methods to reconstruct the color mapping characteristics of the video see-through camera. Second, we present two methods to improve the visual appearance of virtual objects. The first, called differential instant radiosity, combines differential rendering with a global illumination method called instant radiosity to simulate reciprocal shading effects such as shadowing and indirect illumination between real and virtual objects. The second method focuses on the visual plausible rendering of reflective and refractive objects. The high-frequency lighting effects caused by these objects are also simulated with our method. The third part of this thesis presents two user studies which evaluate the influence of the presented rendering methods on human perception. The first user study measured task performance with respect to the rendering mode, and the second user study was set up as a web survey where participants had to choose which of two presented images, showing mixed-reality scenes, they preferred.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/knecht_2013_RSM/", } @phdthesis{mistelbauer_2013_SIV, title = "Smart Interactive Vessel Visualization in Radiology ", author = "Gabriel Mistelbauer", year = "2013", abstract = "Cardiovascular diseases occur with increasing frequency in our society. Their diagnosis often requires tailored visualization techniques, e.g., to examine the blood flow channel in case of luminal narrowing. Curved Planar Reformation (CPR) addresses this field by creating longitudinal sections along the centerline of blood vessels. With the possibility to rotate around an axis, the entire vessel can be assessed for possible vascular abnormalities (e.g., calcifications on the vessel wall, stenoses, and occlusions). In this thesis, we present a visualization technique, called Centerline Reformation (CR), that offers the possibility to investigate the interior of any blood vessel, regardless of its spatial orientation. Starting from the projected vessel centerlines, the lumen of any vessel is generated by employing wavefront propagation in image space. The vessel lumen can be optionally delineated by halos, to enhance spatial relationships when examining a dense vasculature. We present our method in a focus+context setup, by rendering a different kind of visualization around the lumen. We explain how to resolve correct visibility of multiple overlapping vessels in image space. Additionally, our visualization method allows the examination of a complex vasculature by means of interactive vessel filtering and subsequent visual querying. We propose an improved version of the Centerline Reformation (CR) technique, by generating a completely three-dimensional reformation of vascular structures using ray casting. We call this process Curved Surface Reformation (CSR). In this method, the cut surface is smoothly extended into the surrounding tissue of the blood vessels. Moreover, automatically generated cutaways reveal as much of the vessel lumen as possible, while still retaining correct visibility. This technique offers unrestricted navigation within the inspected vasculature and allows diagnosis of any tubular structure, regardless of its spatial orientation. The growing amount of data requires increasing knowledge from a user in order to select the appropriate visualization method for their analysis. In this thesis, we present an approach that externalizes the knowledge of domain experts in a human readable form and employs an inference system to provide only suitable visualization techniques for clinical diagnosis, namely Smart Super Views. We discuss the visual representation of such automatically suggested visualizations by encoding the respective relevance into shape and size of their view. By providing a smart spatial arrangement and integration, the image becomes the menu itself. Such a system offers a guided medical diagnosis by domain experts. After presenting the approach in a general setting, we describe an application scenario for diagnostic vascular visualization techniques. Since vascular structures usually consist of many vessels, we describe an anatomical layout for the investigation of the peripheral vasculature of the human lower extremities. By aggregating the volumetric information around the vessel centerlines in a circular fashion, we provide only a single static image for the assessment of the vessels. We call this method Curvicircular Feature Aggregation (CFA). In addition, we describe a stability analysis on the local deviations of the centerlines of vessels to determine potentially imprecise definitions. By conveying this information in the visualization, a fast visual analysis of the centerline stability is feasible. ", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mistelbauer_2013_SIV/", } @phdthesis{Reisner_Irene_2013_R3D, title = "Reconstruction of 3D Models from Images and Point Clouds with Shape Primitives", author = "Irene Reisner-Kollmann", year = "2013", abstract = "3D models are widely used in different applications, including computer games, planning software, applications for training and simulation, and virtual city maps. For many of these applications it is necessary or at least advantageous, if the virtual 3D models are based on real world scenes and objects. Manual modeling is reserved for experts as it requires extensive skills. For this reason, it is necessary to provide automatic or semi-automatic, easy-to-use techniques for reconstructing 3D objects. In this thesis we present methods for reconstructing 3D models of man-made scenes. These scenes can often be approximated with a set of geometric primitives, like planes or cylinders. Using geometric primitives leads to light-weight, low-poly 3D models, which are beneficial for efficient storage and post-processing. The applicability of reconstruction algorithms highly depends on the existing input data, the characteristics of the captured objects, and the desired properties of the reconstructed 3D model. For this reason, we present three algorithms that use different input data. It is possible to reconstruct 3D models from just a few photographs or to use a dense point cloud as input. Furthermore, we present techniques to combine information from both, images and point clouds. The image-based reconstruction method is especially designed for environments with homogenous and reflective surfaces where it is difficult to acquire reliable point sets. Therefore we use an interactive application which requires user input. Shape primitives are fit to user-defined segmentations in two or more images. Our point-based algorithms, on the other hand, provide fully automatic reconstructions. Nevertheless, the automatic computations can be enhanced by manual user inputs for generating improved results. The first point-based algorithm is specialized on reconstructing 3D models of buildings and uses unstructured point clouds as input. The point cloud is segmented into planar regions and converted into 3D geometry. The second point-based algorithm additionally supports the reconstruction of interior scenes. While unstructured point clouds are supported as well, this algorithm specifically exploits the redundancy and visibility information provided by a set of range images. The data is automatically segmented into geometric primitives. Then the shape boundaries are extracted either automatically or interactively.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Reisner_Irene_2013_R3D/", } @phdthesis{Konyha_2013_IVA, title = "Interactive Visual Analysis in Automotive Engineering Design", author = "Zoltan Konyha", year = "2013", abstract = "Computational simulation has become instrumental in the design process in automotive engineering. Virtually all components and subsystems of automobiles can be simulated. The simulation can be repeated many times with varied parameter settings, thereby simulating many possible design choices. Each simulation run can produce a complex, multivariate, and usually timedependent result data set. The engineers’ goal is to generate useful knowledge from those data. They need to understand the system’s behavior, find correlations in the results, conclude how results depend on the parameters, find optimal parameter combinations, and exclude the ones that lead to undesired results. Computational analysis methods are widely used and necessary to analyze simulation data sets, but they are not always sufficient. They typically require that problems and interesting data features can be precisely defined from the beginning. The results of automated analysis of complex problems may be difficult to interpret. Exploring trends, patterns, relations, and dependencies in time-dependent data through statistical aggregates is not always intuitive. In this thesis, we propose techniques and methods for the interactive visual analysis (IVA) of simulation data sets. Compared to computational methods, IVA offers new and different analysis opportunities. Visual analysis utilizes human cognition and creativity, and can also incorporate the experts’ domain knowledge. Therefore, their insight into the data can be amplified, and also less precisely defined problems can be solved. We introduce a data model that effectively represents the multi-run, time-dependent simulation results as families of function graphs. This concept is central to the thesis, and many of the innovations in this thesis are closely related to it.We present visualization techniques for families of function graphs. Those visualizations, as well as well-known information visualization plots, are integrated into a coordinated multiple views framework. All views provide focus+context visualization. Compositions of brushes spanning several views can be defined iteratively to select interesting features and promote information drill-down. Valuable insight into the spatial aspect of the data can be gained from (generally domain-specific) spatio-temporal visualizations. In this thesis, we propose interactive, glyph-based 3D visualization techniques for the analysis of rigid and elastic multibody system simulations. We integrate the on-demand computation of derived data attributes of families of function graphs into the analysis workflow. This facilitates the selection of deeply hidden data features that cannot be specified by combinations of simple brushes on the original data attributes. The combination of these building blocks supports interactive knowledge discovery. The analyst can build a mental model of the system; explore also unexpected features and relations; and generate, verify or reject hypotheses with visual tools; thereby gaining more insight into the data. Complex tasks, such as parameter sensitivity analysis and optimization can be solved. Although the primary motivation for our work was the analysis of simulation data sets in automotive engineering, we learned that this data model and the analysis procedures we identified are also applicable to several other problem domains. We discuss common tasks in the analysis of data containing families of function graphs. Two case studies demonstrate that the proposed approach is indeed applicable to the analysis of simulation data sets in automotive engineering. Some of the contributions of this thesis have been integrated into a commercially distributed software suite for engineers. This suggests that their impact can extend beyond the visualization research community.", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Konyha_2013_IVA/", } @phdthesis{Schulze_Florian_2013_CMI, title = "Computational Methods enabling Interactivity in Analysis and Exploration of Volumetric Images", author = "Florian Schulze", year = "2013", abstract = "Volumetric imaging is widely used in medicine and life sciences allowing to gain insight into otherwise opaque objects. Volumetric images do not unveil their content in a direct way due to their spatial structure. Therefore a variety of computational methods are used for visualization and processing which allow to explore and analyze the data. Analysis and exploration of the data is usually performed in an interactive way either manually or with support of semi-automatic algorithms. It is crucial for an efficient completion of the task that the system performs interactively and responsively. Thus, software supporting the user in an effective way relies on three basic requirements. First, the system must deliver feedback in a short period of time. Second, results of any computation must be presented or visualized in a way that the user can efficiently recognize the important information. Third, the user must be able to efficiently control, initialize or adjust the algorithm through a suitable user interface. In this thesis four approaches are presented which aim to solve different aspects of the problem of enabling interactivity in analysis and exploration of volumetric image data. The first presented project studies the design of an application which has strict limitations concerning the user interface due to the application environment which requires almost a hands free interaction. The problem is approached by the development of efficient and robust visualization which makes adjustments needless, and by the development of sophisticated interaction patterns which reduce the needed interface to the minimum. The second project focuses on methods which optimize a computationally intensive feature detection task that can be used in an interactive scenario which requires the algorithm to produce results in just a few seconds. To achieve this goal the probabilistic boosting tree classification algorithm is extended and optimized for runtime and memory efficiency. The third and the fourth project focus on the interactive exploration of large image and object collections. Two approaches are presented for this problem area. For the retrieval of neuronal objects by similarity, measures for different neuronal substructures have been developed which are able to perform efficiently on large amounts of data. For retrieval of images and objects by local means such as neighborhood, overlap, local image expression and local image similarity a sophisticated updatable high performance index structure has been developed. The index allows to store local properties of volumetric data in a space efficient way and to retrieve this data with low latency. The presented projects demonstrate that the challenge of achieving interactivity often is the development of methods which allow to balance processing speed with result quality. Furthermore it is shown that time performance is not the only property which needs to be respected,result presentation as well as interaction patterns deserve similar attention and contribute greatly to an interactive user experience.", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Schulze_Florian_2013_CMI/", } @phdthesis{amirkhanov-2012-thesis, title = "Visualization of Industrial 3DXCT Data", author = "Artem Amirkhanov", year = "2012", abstract = "Three-dimensional X-ray computed tomography (3DXCT) is a powerful technique for generating a digital 3D volumetric representation of a specimen from a series of 2D X-ray penetration images. The main advantage of 3DXCT is its ability to detect both the interior and the exterior structure of a specimen in one single scan. Having been used in medical diagnostics for a long time, 3DXCT is increasingly employed in industry as a method for nondestructive testing and quality control. One especially challenging industrial application is metrology, which has to fulfill the demands of today’s standards in industrial quality control. 3DXCT facilitates dimensional measurements of internal structures and of inaccessible parts of a component. However the successful industrial application of 3DXCT is constrained by a set of major problems: Artifacts: Industrial 3DXCT systems face problems due to various types of artifacts. The appearance of artifacts in the 3DXCT scan data distorts its correlation to the actual evaluated industrial object and can lead to errors in measurements and false analysis results. Some types of artifacts are affected by the placement of a specimen in the scanning device. Multi-material components: Another problem is occurring when multi-material components (MMCs) are inspected using industrial 3DXCT. Common industrial MMCs may contain metal parts surrounded by plastic materials. A major problem of this type of components is the presence of metal-caused streaking artifacts and distortions. They are located around metal components and significantly influence the material characterization. Furthermore these streaking artefacts and distortions may even prevent any further analysis (especially for the plastic components). Measurements uncertainty: If metrology using 3DXCT is performed, the location of the specimen surface is estimated using the reconstructed 3D volume data. As opposed to mechanical or optical measurement techniques, the surface is not explicit and has a particular positional uncertainty depending on the artifacts and noise in the scan data and the surface extraction algorithm. Conventional CT metrology software does not account for the uncertainty of the data. This thesis is devoted to the development of techniques overcoming the aforementioned problems of common industrial tasks involving the usage of 3DXCT for nondestructive testing and quality control with a main focus on industrial 3DXCT metrology. Several novel contributions utilizing visualization techniques and visual analysis methods were implemented in integrated tools assisting typical industrial 3DXCT tasks during different stages of the data pipeline.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/amirkhanov-2012-thesis/", } @phdthesis{varchola_andrej-2012-fetoscopic, title = "Live Fetoscopic Visualization of 4D Ultrasound Data", author = "Andrej Varchola", year = "2012", abstract = "Ultrasound (US) imaging is due to its real-time character, low cost, non-invasive nature, high availability, and many other factors, considered a standard diagnostic procedure during pregnancy. The quality of diagnostics depends on many factors, including scanning protocol, data characteristics and visualization algorithms. In this work, several problems of ultrasound data visualization for obstetric ultrasound imaging are discussed and addressed. The capability of ultrasound scanners is growing and modern ultrasound devices produce large amounts of data that have to be processed in real-time. An ultrasound imaging system is in a broad sense a pipeline of several operations and visualization algorithms. Individual algorithms are usually organized in modules that separately process the data. In order to achieve the required level of detail and high quality images with the visualization pipeline, we had to address the flow of large amounts of data on modern computer hardware with limited capacity. We developed a novel architecture of visualization pipeline for ultrasound imaging. This visualization pipeline combines several algorithms, which are described in this work, into the integrated system. In the context of this pipeline, we advocate slice-based streaming as a possible approach for the large data flow problem. Live examination of the moving fetus from ultrasound data is a challenging task which requires extensive knowledge of the fetal anatomy and a proficient operation of the ultrasound machine. The fetus is typically occluded by structures which hamper the view in 3D rendered images. We developed a novel method of visualizing the human fetus for prenatal sonography from 3D/4D ultrasound data. It is a fully automatic method that can recognize and render the fetus without occlusion, where the highest priority is to achieve an unobstructed view of the fetal face. Our smart visibility method for prenatal ultrasound is based on a ray-analysis performed within image-based direct volume rendering (DVR). It automatically calculates a clipping surface that removes the uninteresting structures and uncovers the interesting structures of the fetal anatomy behind. The method is able to work with the data streamed on-the-fly from the ultrasound transducer and to visualize a temporal sequence of reconstructed ultrasound data in real time. It has the potential to minimize the interaction of the operator and to improve the comfort of patients by decreasing the investigation time. This can lead to an increased confidence in the prenatal diagnosis with 3D ultrasound and eventually decrease the costs of the investigation. Ultrasound scanning is very popular among parents who are interested in the health condition of their fetus during pregnancy. Parents usually want to keep the ultrasound images as a memory for the future. Furthermore, convincing images are important for the confident communication of findings between clinicians and parents. Current ultrasound devices offer advanced imaging capabilities, but common visualization methods for volumetric data only provide limited visual fidelity. The standard methods render only images with a plastic-like appearance which do not correspond to naturally looking fetuses. This is partly due to the dynamic and noisy nature of the data which limits the applicability of standard volume visualization techniques. In this thesis, we present a fetoscopic rendering method which aims to reproduce the quality of fetoscopic examinations (i.e., physical endoscopy of the uterus) from 4D sonography data. Based on the requirements of domain experts and the constraints of live ultrasound imaging, we developed a method for high-quality rendering of prenatal examinations. We employ a realistic illumination model which supports shadows, movable light sources, and realistic rendering of the human skin to provide an immersive experience for physicians and parents alike. Beyond aesthetic aspects, the resulting visualizations have also promising diagnostic applications. The presented fetoscopic rendering method has been successfully integrated in the state-of-the-art ultrasound imaging systems of GE Healthcare as HDlive imaging tool. It is daily used in many prenatal imaging centers around the world.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "ultrasound, volume rendering, medical imaging", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/varchola_andrej-2012-fetoscopic/", } @phdthesis{ohrhallinger-stefan-2012-the, title = "The Intrinsic Shape of Point Clouds", author = "Stefan Ohrhallinger", year = "2012", abstract = "Given a point cloud, in the form of unorganized points, the problem of automatically connecting the dots to obtain an aesthetically pleasing and piecewise-linear closed interpolating boundary shape has been extensively researched for over three decades. In R3 , it is even more complicated to find an aesthetic closed oriented surface. Most previous methods for shape reconstruction exclusively from coordinates work well only when the point spacing on the shape boundary is dense and locally uniform. The problem of shape construction from non-dense and locally non-uniformly spaced point sets is in our opinion not yet satisfactorily solved. Various extensions to earlier methods do not work that well and do not provide any performance guarantees either. Our main thesis in this research is that a point set, even with non-dense and locally non-uniform spacing, has an intrinsic shape which optimizes in some way the Gestalt principles of form perception. This shape can be formally defined as the minimum of an energy function over all possible closed linear piece-wise interpolations of this point set. Further, while finding this optimal shape is NP-hard, it is possible to heuristically search for an acceptable approximation within reasonable time. Our minimization objective is guided by Gestalt’s laws of Proximity, Good Continuity and Closure. Minimizing curvature tends to satisfy proximity and good continuity. For computational simplification, we globally minimize the longest-edge-in-simplex, since it is intrinsic to a single facet and also a factor in mean curvature. And we require a closed shape. Using such an intrinsic criterion permits the extraction of an approximate shape with a linearithmic algorithm as a simplicial complex, which we have named the Minimum Boundary Complex. Experiments show that it seems to be a very close approximation to the desired boundary shape and that it retains its genus. Further it can be constructed locally and can also handle sensor data with significant noise. Its quick construction is due to not being restricted by the manifold property, required in the boundary shape. Therefore it has many applications where a manifold shape is not necessary, e.g. visualization, shape retrieval, shadow mapping, and topological data analysis in higher dimensions. The definition of the Minimum Boundary Complex is our first major contribution. Our next two contributions include new methods for constructing boundary shapes by transforming the boundary complex into a close approximation of the minimum boundary shape. These algorithms vary a topological constraint to first inflate the boundary complex to recover a manifold hull and then sculpture it to extract a Minimum Boundary approximation, which interpolates all the points. In the R3 method, we show how local minima can be avoided by covering holes in the hull. Finally, we apply a mesh fairing step to optimize mean curvature directly. We present results for shape construction in R2 and R3 , which clearly demonstrate that our methods work better than the best performing earlier methods for non-dense and locally non-uniformly spaced point sets, while maintaining competitive linearithmic complexity. ", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Surface Reconstruction, Manifold Reconstruction, Point Cloud, Shape Boundary, Gestalt, Curve Reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/ohrhallinger-stefan-2012-the/", } @phdthesis{muigg-2012-svr, title = "Scalability for Volume Rendering and Information Visualization Approaches in the Context of Scientific Data", author = "Philipp Muigg", year = "2012", abstract = "Data from numerical simulations that model physical processes has to be explored and analyzed in a broad range of different fields of research and development. Besides data mining and statistics, visualization is among the most important methods that grant domain experts insight into their complex simulation results. In order to keep up with ongoing improvements of simulation methods as well as ever increasing amounts of data, state-of-the-art visualization techniques have to be scalable with respect to many different properties. Many numerical models rely on a domain decomposition defined by a volumetric grid. Finer grids yield more accurate simulation results at the cost of longer computing times. The wide availability of high-performance computing resources has resulted in increasingly detailed data sets. The first volume rendering approach that is presented in this thesis uses bricking and resampling to cope with such high resolution data. Important regions of the simulated volume are visualized in as much detail as possible whereas lower resolution representations are used for less important portions of a data set. This allows for interactive frame rates even when dealing with the highly detailed grids that are used by state-of-the-art simulation models. Grid resolution, however, is only one aspect that has increased due to the ongoing development of numerical methods. Grid complexity has increased as well. While initial simulation techniques have required simple tetrahedral meshes current methods can cope with polyhedral cells that allow for increased solver efficiency and simulation accuracy. The second volume visualization algorithm that is presented in this thesis is scalable with respect to grid complexity since it is capable of directly visualizing data defined on grids which comprise polyhedral cells. Raycasting is performed by using a novel data structure that allows for easy grid traversal while retaining a very compact memory footprint. Both aforementioned volume rendering techniques utilize the massively parallel computing resources that are provided by modern graphics processing units. Many information visualization methods are designed to explore and analyze abstract data that is often high dimensional. Since improvements in the field of numerical modelling have led to simulation data sets that contain a large number of physical attributes the application of techniques from the field of information visualization can provide additional important information to domain experts. However, in order to apply information visualization methods to scientific data such as numerical simulation results, additional scalability issues have to be addressed. This thesis introduces multiple methods that can be used to reduce cluttering and overdrawing problems for line-based techniques such as phase-space diagrams, parallel coordinates and a novel time-series visualization. The trajectories of important trends in the data are illustrated by blurring a noise texture along them. A novel coloring scheme is used to provide visual linking-information across multiple visualizations in a multi-view framework. The proposed approaches are primarily image-based which makes them very scalable with respect to data set sizes. The usefulness and real-world applicability of the techniques that are introduced in this thesis is demonstrated in a case study. A complex computational fluid dynamics data set, which contains several simulated breathing cycles within the human upper respiratory tract, is analyzed. The exploration of the data has yielded several hypothesis that are of importance to an ENT specialist. Many of the techniques presented in this work have also been used in the context of additional collaborations in a multitude of fields such as medicine, climatology, meteorology, and engineering.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/muigg-2012-svr/", } @phdthesis{Gavrilescu_2011_VGP, title = "Visualization and Graphical Processing of Volume Data", author = "Marius Gavrilescu", year = "2011", abstract = "The extraction and visualization of information from volume data constitute important research avenues in computer graphics and imaging. The rapid development of GPUs with increasing computational power has made it possible to generate intuitive, three-dimensional representations of data sets, which can be tweaked and manipulated interactively. This thesis presents various techniques developed within the field of volume graphics. These have wide applicability in the generation of meaningful images from mainly CT and MRI data sets. The work addresses multiple aspects of volume visualization and rendering, such as the representation, classification and in-depth graphical analysis of the information contained within volume data. Initially, we present generic information on the nature of volume data, the mathematical and physical models behind volume rendering, as well as the rendering algorithms used within our prototyping framework for the rendering of images. Subsequently, we address the problem of volume classification, where we explore the use of various types of transfer functions. These operate on voxel properties such as the gradient, curvature or visibility, allowing for the isolation of increasingly complex and problematic features. We provide alternative, more computationally-efficient ways of approximating some of these properties and show how they can be used for classification purposes. We also provide an effective way of specifying multidimensional transfer functions from 1D components, thus increasing the flexibility and expanding the potential of the classification process. Another part of the thesis deals with cardiac MRI data. Specifically, we develop a tool for the visual inspection of parameters which influence the status and functionality of the left ventricle. The considered parameters are the thickness and thickening of the myocardial wall, the moment of maximum thickness and the average speed of the wall during a cardiac cycle. Starting from segmentation contours which outline the epicardium and endocardium, we construct surfaces and use these to visualize the distribution of parameter values using color coding. The technique allows for information from multiple slices, over multiple phases and stress levels to be represented on a single 3D geometry, therefore facilitating the analysis of multidimensional data sets comprising a large number of slices. The values of the cardiac parameters are depicted in an intuitive manner, making them easily accessible to both medical staff and patients with no medical training. In the last part of the thesis we develop a method for the analysis of parameters involved in the volume rendering pipeline. The technique involves sampling the parameters across their domains, rendering images for each sample, and computing the differences among these images. The resulting values characterize the behavior and stability of the parameters across their domains. These values are further used to augment various user interfaces, such as sliders or transfer function specification widgets. The newly-modified interfaces use color coding, graphs, arrows and other info-vis techniques to show the potential changes induced by the parameters in images resulting from volume rendering, thus allowing users to make better-informed decisions when adjusting parameter values. ", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "parameter stability, left ventricle, cardiac parameters, feature enhancement, transfer function, volume rendering, visualization, user interface", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/Gavrilescu_2011_VGP/", } @phdthesis{PH-2011-LDS, title = "Large Data Scalability in Interactive Visual Analysis", author = "Harald Piringer", year = "2011", abstract = "In many areas of science and industry, the amount of data is growing fast and often already exceeds the ability to evaluate it. On the other hand, the unprecedented amount of available data bears an enormous potential for supporting decision-making. Turning data into comprehensible knowledge is thus a key challenge of the 21st century. The power of the human visual system makes visualization an appropriate method to comprehend large data. In particular interactive visualization enables a discourse between the human brain and the data that can transform a cognitive problem to a perceptual one. However, the visual analysis of large and complex datasets involves both visual and computational challenges. Visual limits involve perceptual and cognitive limitations of the user and restrictions of the display devices while computational limits are related to the computational complexity of the involved algorithms. The goal of this thesis is to advance the state of the art in visual analysis with respect to the scalability to large datasets. Due to the multifaceted nature of scalability, the contributions span a broad range to enhance computational scalability, to improve the visual scalability of selected visualization approaches, and to support an analysis of high-dimensional data. Concerning computational scalability, this thesis describes a generic architecture to facilitate the development of highly interactive visual analysis tools using multi-threading. The architecture builds on the separation of the main application thread and dedicated visualization threads, which can be cancelled early due to user interaction. A quantitative evaluation shows fast visual feedback during continuous interaction even for millions of entries. Two variants of scatterplots address the visual scalability of different types of data and tasks. For continuous data, a combination of 2D and 3D scatterplots intends to combine the advantages of 2D interaction and 3D visualization. Several extensions improve the depth perception in 3D and address the problem of unrecognizable point densities in both 2D and 3D. For partly categorical data, the thesis contributes Hierarchical Difference Scatterplots to relate multiple hierarchy levels and to explicitly visualize differences between them in the context of the absolute position of pivoted values. While comparisons in Hierarchical Difference Scatterplots are only qualitative, this thesis also contributes an approach for quantifying subsets of the data by means of statistical moments for a potentially large number of dimensions. This approach has proven useful as an initial overview as well as for a quantitative comparison of local features like clusters. As an important application of visual analysis, the validation of regression models also involves the scalability to multi-dimensional data. This thesis describes a design study of an approach called HyperMoVal for this task. The key idea is to visually relate n-dimensional scalar functions to known validation data within a combined visualization. The integration with other multivariate views is a step towards a user-centric workflow for model building. Being the result of collaboration with experts in engine design, HyperMoVal demonstrates how visual analysis is suitable to significantly improve real-world tasks. Positive user feedback suggests a high impact of the contributions of this thesis also outside the visualization research community. Moreover, most contributions of this thesis have been combined in a commercially distributed software framework for engineering applications that will hopefully raise the awareness and promote the use of visual analysis in multiple application domains.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "high dimensionality, Visualization, Scalability, Interaction, Data analysis, multi-threading, scatter plots", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/PH-2011-LDS/", } @phdthesis{waser_2011_VSD, title = "Visual Steering to Support Decision Making in Visdom", author = "J\"{u}rgen Waser", year = "2011", abstract = "Computer simulation has become an ubiquitous tool to investigate the nature of systems. When steering a simulation, users modify parameters to study their impact on the simulation outcome. The ability to test alternative options provides the basis for interactive decision making. Increasingly complex simulations are characterized by an intricate interplay of many heterogeneous input and output parameters. A steering concept that combines simulation and visualization within a single, comprehensive system is largely missing. This thesis targets the basic components of a novel integrated steering system called Visdom to support the user in the decision making process. The proposed techniques enable users to examine alternative scenarios without the need for special simulation expertise. To accomplish this, we propose World Lines as a management strategy for multiple, related simulation runs. In a dedicated view, users create and navigate through many simulation runs. New decisions are included through the concept of branching. To account for uncertain knowledge about the input parameters, we provide the ability to cover full parameter distributions. Via multiple cursors, users navigate a system of multiple linked views through time and alternative scenarios. In this way, the system supports comparative visual analysis of many simulation runs. Since the steering process generates a huge amount of information, we employ the machine to support the user in the search for explanations inside the computed data. Visdom is built on top of a data-flow network to provide a high level of modularity. A decoupled meta-flow is in charge of transmitting parameter changes from World Lines to the affected dataflow nodes. To direct the user attention to the most relevant parts, we provide dynamic visualization inside the flow diagram. The usefulness of the presented approach is substantiated through case studies in the field of flood management. The Visdom application enables the design of a breach closure by dropping sandbags in a virtual environment.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "CFD, Data-Flow, Simulation Steering, Comparative Visual Analysis, Multiple Simulation Runs, Problem Solving Environment, Hypothesis Generation, Uncertainty, Flood Management", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/waser_2011_VSD/", } @phdthesis{haidacher-2011-phd, title = "Information-based Feature Enhancement in Scientific Visualization", author = "Martin Haidacher", year = "2011", abstract = "Scientific visualization is a research area which gives insight into volumetric data acquired through measurement or simulation. The visualization allows a faster and more intuitive exploration of the data. Due to the rapid development in hardware for the measurement and simulation of scientific data, the size and complexity of data is constantly increasing. This has the benefit that it is possible to get a more accurate insight into the measured or simulated phenomena. A drawback of the increasing data size and complexity is the problem of generating an expressive representation of the data. Since only certain parts of the data are necessary to make a decision, it is possible to mask parts of the data along the visualization pipeline to enhance only those parts which are important in the visualization. For the masking various properties are extracted from the data which are used to classify a part as important or not. In general a transfer function is used for this classification process which has to be designed by the user. In this thesis three novel approaches are presented which use methods from information theory and statistics to enhance features from the data in the classification process that are important for a certain task. With the tools of information theory and statistics it is possible to extract properties from the data which are able to classify different materials or tissues in the data better than comparable other approaches. One approach adaptively extracts statistical properties, i.e. the mean value and the standard deviation, of the data values in the local neighborhood of each point in the data set. With these statistical properties it is possible to better distinguish between different materials in a data set even though the data is very noisy. The other two approaches in this thesis employ methods from information theory to extract features from multimodal data sets. Thus it is possible to enhance features of the data which are either very similar or very dissimilar in both modalities. Through information theory the variations in the value ranges of both modalities do not influence the classification of these features. All three approaches define novel transfer-function spaces which simplify the design process of a transfer function for the user. Different features of the data, such as different materials, can be clearly depicted in these spaces. Therefore, it is easier for a user to design a transfer function which enhances the features of importance for a certain task. For each of the new approaches results and comparisons to other existing techniques are shown to highlight the usefulness of the proposed methods. Through the described research it is shown that information theory and statistics are tools which are able to extract expressive properties from the data. In the introduction a broad overview over scientific visualization and the visualization pipeline is given. The classification process is described in more detail. Since information theory and statistics play an important role for all three approaches, a brief introduction to these concepts is given as well.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Scientific visualization, Information theory, Volume classification", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/haidacher-2011-phd/", } @phdthesis{musialski-2010-pfi, title = "Processing of Fa\c{c}ade Imagery", author = "Przemyslaw Musialski", year = "2010", abstract = "Modeling and reconstruction of urban environments is currently the subject of intensive research. There is a wide range of possible applications, including virtual environments like cyber-tourism, computer games, and the entertainment industries in general, as well as urban planning and architecture, security planning and training, traffic simulation, driving guidance and telecommunications, to name but a few. The research directions are spread across the disciplines of computer vision, computer graphics, image processing, photogrammetry and remote sensing, as well as architecture and the geosciences. Reconstruction is a complex problem and requires an entire pipeline of different tasks. In this thesis we focus on processing of images of fa\c{c}ades which is one specific subarea of urban reconstruction. The goal of our research is to provide novel algorithmic solutions for problems in fa\c{c}ade imagery processing. In particular, the contribution of this thesis is the following: First, we introduce a system for generation of approximate orthogonal fa\c{c}ade images. The method is a combination of automatic and interactive tools in order to provide a convenient way to generate high-quality results. The second problem addressed in this thesis is fa\c{c}ade image segmentation. In particular, usually by segmentation we mean the subdivision of the fa\c{c}ade into windows and other architectural elements. We address this topic with two different algorithms for detection of grids over the fa\c{c}ade image. Finally, we introduce one more fa\c{c}ade processing algorithm, this time with the goal to improve the quality of the fa\c{c}ade appearance. The algorithm propagates visual information across the image in order to remove potential obstacles and occluding objects. The output is intended as source for textures in urban reconstruction projects. The construction of large three-dimensional urban environments itself is beyond the scope of this thesis. However, we propose a suite of tools together with mathematical foundations that contribute to the state-of-the-art and provide helpful building blocks important for large scale urban reconstruction projects.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "facade processing, urban reconstruction, image processing", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/musialski-2010-pfi/", } @phdthesis{balabanian-2009-mav, title = "Multi-Aspect Visualization: Going from Linked Views to Integrated Views", author = "Jean-Paul Balabanian", year = "2010", abstract = "This thesis is a delve into the matter of visualization integration. There are many approaches to visualizing volume data and often several of these approaches can appropriately be used at the same time to visualize di erent aspects. The usual way is to visualize these aspects separately in di erent views, but integrating the visualizations into the same view can often be the superior approach. We describe the two most used approaches to visualizing several aspects at the same time; linked views and integrated views. We describe some approaches to create integrated visualizations by showing where in the visualization pipeline the integration takes place. We present work produced by the author describing the integrated visualizations developed.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/balabanian-2009-mav/", } @phdthesis{lipp_markus-2010-DAC, title = "Direct Artist Control for Procedural Content Generation of Urban Environments", author = "Markus Lipp", year = "2010", abstract = "Creating 3D digital assets of urban environments is a challenging task, requiring a significant amount of manual labor. To automate parts of this process, many procedural modeling methods to automatically create buildings, plants or entire cities were introduced. The main advantage of such methods compared to manual methods is the ability to create large amounts of assets using just a few parameters as input data. However, the main disadvantage is the difficulty to control or predict the output of such methods. Direct controllability is especially important for artists enabling them to model the output to their vision or requirements. Therefore, the main goal of this thesis is combining the direct control provided by manual methods with the power of procedural modeling. To achieve this, several new methods and paradigms bringing direct and visual artist control to procedural generation of urban environments are contributed in this thesis. These include a method enabling a visual design process for building grammars, as well as methods providing direct artist control for architecture generation algorithms. To model whole cities, we introduce a layering system for urban layouts based on graph cut, contributing the ability to perform direct and persistent changes to a procedurally generated city. Additionally, the concept of anchored assignments is introduced, enabling direct control of parameter distributions on cities. Finally, as real-time performance of generation algorithms is paramount if they are to be used in a production setting, we introduce an algorithm able to parallelize the work of L-system generation to thousands of processors.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Procedural Modeling, Urban Layouts, Interactive Control", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/lipp_markus-2010-DAC/", } @phdthesis{Mattausch-2010-vcr, title = "Visibility Computations for Real-Time Rendering in General 3D Environments", author = "Oliver Mattausch", year = "2010", abstract = "Visibility computations are essential operations in computer graphics, which are required for rendering acceleration in the form of visibility culling, as well as for computing realistic lighting. Visibility culling, which is the main focus of this thesis, aims to provide output sensitivity by sending only visible primitives to the hardware. Regardless of the rapid development of graphics hardware, it is of crucial importance for many applications like game development or architectural design, as the demands on the hardware regarding scene complexity increase accordingly. Solving the visibility problem has been an important research topic for many years, and countless methods have been proposed. Interestingly, there are still open research problems up to this day, and many algorithms are either impractical or only usable for specific scene configurations, preventing their widespread use. Visibility culling algorithms can be separated into algorithms for visibility preprocessing and online occlusion culling. Visibility computations are also required to solve complex lighting interactions in the scene, ranging from soft and hard shadows to ambient occlusion and full fledged global illumination. It is a big challenge to answer hundreds or thousands of visibility queries within a fraction of a second in order to reach real-time frame rates, which is one goal that we want to achieve in this thesis. The contribution of this thesis are four novel algorithms that provide solutions for efficient visibility interactions in order to achieve high-quality output-sensitive real-time rendering, and are general in the sense that they work with any kind of 3D scene configuration. First we present two methods dealing with the issue of automatically partitioning view space and object space into useful entities that are optimal for the subsequent visibility computations. Amazingly, this problem area was mostly ignored despite its importance, and view cells are mostly tweaked by hand in practice in order to reach optimal performance – a very time consuming task. The first algorithm specifically deals with the creation of an optimal view space partition into view cells using a cost heuristics and sparse visibility sampling. The second algorithm extends this approach to optimize both view space subdivision and object space subdivision simultaneously. Next we present a hierarchical online culling algorithm that eliminates most limitations of previous approaches, and is rendering engine friendly in the sense that it allows easy integration and efficient material sorting. It reduces the main problem of previous algorithms – the overhead due to many costly state changes and redundant hardware occlusion queries – to a minimum, obtaining up to three times speedup over previous work. At last we present an ambient occlusion algorithm which works in screen space, and show that high-quality shading with effectively hundreds of samples per pixel is possible in real time for both static and dynamic scenes by utilizing temporal coherence to reuse samples from previous frames.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "3D rendering, real-time rendering, ambient occlusion, visibility, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Mattausch-2010-vcr/", } @phdthesis{malik-thesis, title = "Feature Centric Volume Visualization", author = "Muhammad Muddassir Malik", year = "2009", abstract = "This thesis presents techniques and algorithms for the effective exploration of volumetric datasets. The Visualization techniques are designed to focus on user specified features of interest. The proposed techniques are grouped into four chapters namely feature peeling, computation and visualization of fabrication artifacts, locally adaptive marching cubes, and comparative visualization for parameter studies of dataset series. The presented methods enable the user to efficiently explore the volumetric dataset for features of interest. Feature peeling is a novel rendering algorithm that analyzes ray profiles along lines of sight. The profiles are subdivided according to encountered peaks and valleys at so called transition points. The sensitivity of these transition points is calibrated via two thresholds. The slope threshold is based on the magnitude of a peak following a valley, while the peeling threshold measures the depth of the transition point relative to the neighboring rays. This technique separates the dataset into a number of feature layers. Fabrication artifacts are of prime importance for quality control engineers for first part inspection of industrial components. Techniques are presented in this thesis to measure fabrication artifacts through direct comparison of a reference CAD model with the corresponding industrial 3D X-ray computed tomography volume. Information from the CAD model is used to locate corresponding points in the volume data. Then various comparison metrics are computed to measure differences (fabrication artifacts) between the CAD model and the volumetric dataset. The comparison metrics are classified as either geometry-driven comparison techniques or visual-driven comparison techniques. The locally adaptive marching cubes algorithm is a modification of the marching cubes algorithm where instead of a global iso-value, each grid point has its own iso-value. This defines an iso-value field, which modifies the case identification process in the algorithm. An iso-value field enables the algorithm to correct biases within the dataset like low frequency noise, contrast drifts, local density variations, and other artifacts introduced by the measurement process. It can also be used for blending between different iso-surfaces (e.g., skin, and bone in a medical dataset). Comparative visualization techniques are proposed to carry out parameter studies for the special application area of dimensional measurement using industrial 3D X-ray computed tomography. A dataset series is generated by scanning a specimen multiple times by varying parameters of the scanning device. A high resolution series is explored using a planar reformatting based visualization system. A multi-image view and an edge explorer are proposed for comparing and visualizing gray values and edges of several datasets simultaneously. For fast data retrieval and convenient usability the datasets are bricked and efficient data structures are used.", month = dec, pages = "105", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "marching cubes, feature peeling, difference measurement, multiple datasets, parameter visualization, comparative visualization, industrial computed tomography, volume visualization, fabrication artifacts, magnetic resonance imaging", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/malik-thesis/", } @phdthesis{heinzl-2008-thesis, title = "Analysis and Visualization of Industrial CT Data", author = "Christoph Heinzl", year = "2009", abstract = "Industrial X-Ray 3D computed tomography (3DCT) is on the edge of advancing from a non destructive testing method to a fully standardized means of dimensional measurement for every day industrial use. Currently 3DCT has drawn attention especially in the area of first part inspections of new components, mainly in order to overcome limitations and drawbacks of common methods. Yet an increasing number of companies is benefitting from industrial 3DCT and sporadically the first pioneers start using industrial 3DCT for quality control in the production phase of a component. As 3DCT is still a very young technology of industrial quality control, this method also faces severe problems, which seriously affect measurement results. Some of the major drawbacks for quality control are the following: Artefacts modify the spatial greyvalues, generating artificial structures in the datasets, which do not correspond to reality. Discrete sampling introduces further irregularities due to the Nyquist- Shannon sampling theorem. Uncertainty information is missing when extracting dimensional measurement features. Specifications and limitations of the components and the special setup a 3DCT constrain the best achievable measurement precision. This thesis contributes to the state of the art by algorithmic evaluation of typical industrial tasks in the area of dimensional measurement using 3DCT. The main focus lies in the development and implementation of novel pipelines for everyday industrial use including comparisons to common methods. Convenient and easy to understand means of visualization are evaluated and used to provide insight into the generated results. In particular three pipelines are introduced, which cover some of the major aspects concerning metrology using industrial 3DCT. The considered aspects are robust surface extraction, artefact reduction via dual energy CT, local surface extraction of multi-material components, and statistical analysis of multi-material components. The generated results of each pipeline are demonstrated and verified using test specimens as well as real world components.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/heinzl-2008-thesis/", } @phdthesis{vucini-2009-phd, title = "On Visualization and Reconstruction from Non-uniform Point Sets", author = "Erald Vucini", year = "2009", abstract = "Technological and research advances in both acquisition and simulation devices provide continuously increasing high-resolution volumetric data that by far exceed today's graphical and display capabilities. Non-uniform representations offer a way of balancing this deluge of data by adaptively measuring (sampling) according to the importance (variance) of the data. Also, in many real-life situations the data are known only on a non-uniform representation. Processing of non-uniform data is a non-trivial task and hence more difficult when compared to processing of regular data. Transforming from non-uniform to uniform representations is a well-accepted paradigm in the signal processing community. In this thesis we advocate such a concept. The main motivation for adopting this paradigm is that most of the techniques and methods related to signal processing, data mining and data exploration are well-defined and stable for Cartesian data, but generally are non-trivial to apply to non-uniform data. Among other things, this will allow us to better exploit the capabilities of modern GPUs. In non-uniform representations sampling rates can vary drastically even by several orders of magnitude, making the decision on a target resolution a non-trivial trade-off between accuracy and efficiency. In several cases the points are spread non-uniformly with similar density across the volume, while in other cases the points have an enormous variance in distribution. In this thesis we present solutions to both cases. For the first case we suggest computing reconstructions of the same volume in different resolutions based on the level of detail we are interested in. The second case scenario is the main motivation for proposing a multi-resolution scheme, where the scale of reconstruction is decided adaptively based on the number of points in each subregion of the whole volume. We introduce a novel framework for 3D reconstruction and visualization from non-uniform scalar and vector data. We adopt a variational reconstruction approach. In this method non-uniform point sets are transformed to a uniform representation consisting of B-spline coefficients that are attached to the grid. With these coefficients we can define a C2 continuous function across the whole volume. Several testings were performed in order to analyze and fine-tune our framework. All the testings and the results of this thesis offer a view from a new and different perspective to the visualization and reconstruction from non-uniform point sets.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/vucini-2009-phd/", } @phdthesis{beyer-2009-gpu, title = "GPU-based Multi-Volume Rendering of Complex Data in Neuroscience and Neurosurgery", author = "Johanna Beyer", year = "2009", abstract = "Recent advances in image acquisition technology and its availability in the medical and bio-medical fields have lead to an unprecedented amount of high-resolution imaging data. However, the inherent complexity of this data, caused by its tremendous size, complex structure or multi-modality poses several challenges for current visualization tools. Recent developments in graphics hardware architecture have increased the versatility and processing power of today’s GPUs to the point where GPUs can be considered parallel scientific computing devices. The work in this thesis builds on the current progress in image acquisition techniques and graphics hardware architecture to develop novel 3D visualization methods for the fields of neurosurgery and neuroscience. The first part of this thesis presents an application and framework for planning of neurosurgical interventions. Concurrent GPU-based multi-volume rendering is used to visualize multiple radiological imaging modalities, delineating the patient’s anatomy, neurological function, and metabolic processes. Additionally, novel interaction metaphors are introduced, allowing the surgeon to plan and simulate the surgial approach to the brain based on the individual patient anatomy. The second part of this thesis focuses on GPU-based volume rendering techniques for large and complex EM data, as required in the field of neuroscience. A new mixed-resolution volume ray-casting approach is presented, which circumvents artifacts at block boundaries of different resolutions. NeuroTrace is introduced, an application for interactive segmentation and visualization of neural processes in EM data. EM data is extremely dense, heavily textured and exhibits a complex structure of interconnected nerve cells, making it difficult to achieve high-quality volume renderings. Therefore, this thesis presents a novel on-demand nonlinear noise removal and edge detection method which allows to enhance important structures (e.g., myelinated axons) while de-emphasizing less important regions of the data. In addition to the methods and concepts described above, this thesis tries to bridge the gap between state-of-the-art visualization research and the use of those visualization methods in actual medical and bio-medical applications.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/beyer-2009-gpu/", } @phdthesis{patel-2009-evr, title = "Expressive Visualization and Rapid Interpretation of Seismic Volumes", author = "Daniel Patel", year = "2009", abstract = "One of the most important resources in the world today is energy. Oil and gas provide two thirds of the world energy consumption, making the world completely dependent on it. Locating and recovering the remaining oil and gas reserves will be of high focus in society until competitive energy sources are found. The search for hydrocarbons is broadly speaking the topic of this thesis. Seismic measurements of the subsurface are collected to discover oil and gas trapped in the ground. Identifying oil and gas in the seismic measurements requires visualization and interpretation. Visualization is needed to present the data for further analysis. Interpretation is performed to identify important structures. Visualization is again required for presenting these structures to the user. This thesis investigates how computer assistance in producing high-quality visualizations and in interpretation can result in expressive visualization and rapid interpretation of seismic volumes. Expressive visualizations represent the seismic data in an easily digestible, intuitive and pedagogic form. This enables rapid interpretation which accelerates the nding of important structures.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/patel-2009-evr/", } @phdthesis{zambal-2009-ami, title = "Anatomical Modeling for Image Analysis in Cardiology", author = "Sebastian Zambal", year = "2009", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/zambal-2009-ami/", } @phdthesis{Habel_2009_PhD, title = "Real-time Rendering and Animation of Vegetation", author = "Ralf Habel", year = "2009", abstract = "Vegetation rendering and animation in real-time applications still pose a significant problem due to the inherent complexity of plants. Both the high geometric complexity and intricate light transport require specialized techniques to achieve high-quality rendering of vegetation in real time. This thesis presents new algorithms that address various areas of both vegetation rendering and animation. For grass rendering, an efficient algorithm to display dense and short grass is introduced. In contrast to previous methods, the new approach is based on ray tracing to avoid the massive overdraw of billboard or explicit geometry representation techniques, achieving independence of the complexity of the grass without losing the visual characteristics of grass such as parallax and occlusion effects as the viewpoint moves. Also, a method to efficiently render leaves is introduced. Leaves exhibit a complex light transport behavior due to subsurface scattering and special attention is given to the translucency of leaves, an integral part of leaf shading. The light transport through a leaf is precomputed and can be easily evaluated at runtime, making it possible to shade a massive amount of leaves while including the effects that occur due to the leaf structure such as varying albedo and thickness variations or self shadowing. To animate a tree, a novel deformation method based on a structural mechanics model that incorporates the important physical properties of branches is introduced. This model does not require the branches to be segmented by joints as other methods, achieving smooth and accurate bending, and can be executed fully on a GPU. To drive this deformation, an optimized spectral approach that also incorporates the physical properties of branches is used. This allows animating a highly detailed tree with thousands of branches and ten thousands of leaves efficiently. Additionally, a method to use dynamic skylight models in spherical harmonics precomputed radiance transfer techniques is introduced, allowing to change the skylight parameters in real time at no considerable cost and memory footprint.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Animation, Real-time Rendering, Vegetation", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel_2009_PhD/", } @phdthesis{weidlich-2009-thesis, title = "Pseudochromatic Colourisation of Crystals in Predictive Image Synthesis", author = "Andrea Weidlich", year = "2009", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Predictive rendering, Crystal rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich-2009-thesis/", } @phdthesis{rautek-2009-vmv, title = "Semantic Visualization Mapping for Volume Illustration", author = "Peter Rautek", year = "2009", abstract = "Scientific visualization is the discipline of automatically rendering images from scientific data. Adequate visual abstractions are important to show relevant information in the data. Visual abstractions are a trade-off between showing detailed information and preventing visual overload. To use visual abstractions for the depiction of data, a mapping from data attributes to visual abstractions is needed. This mapping is called the visualization mapping. This thesis reviews the history of visual abstractions and visualizationmapping in the context of scientific visualization. Later a novel visual abstraction method called caricaturistic visualization is presented. The concept of exaggeration is the visual abstraction used for caricaturistic visualization. Principles from traditional caricatures are used to accentuate salient details of data while sparsely sketching the context. The visual abstractions described in this thesis are inspired by visual art and mostly by traditional illustration techniques. To make effective use of the recently developed visualizationmethods, that imitate illustration techniques, an expressive visualization mapping approach is required. In this thesis a visualization mapping method is investigated that makes explicit use of semantics to describe mappings from data attributes to visual abstractions. The semantic visualization mapping explicitly uses domain semantics and visual abstraction semantics to specify visualization rules. Illustrative visualization results are shown that are achieved with the semantic visualization mapping. The behavior of the automatically rendered interactive illustrations is specified using interaction-dependent visualization rules. Interactions like the change of the viewpoint, or the manipulation of a slicing plane are state of the art in volume visualization. In this thesis a method for more elaborate interaction techniques is presented. The behavior of the illustrations is specified with interaction-dependent rules that are integrated in the semantic visualization mapping approach.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/rautek-2009-vmv/", } @phdthesis{scherzer-thesis, title = "Applications of temporal coherence in real-time rendering", author = "Daniel Scherzer", year = "2009", abstract = "Real-time rendering imposes the challenging task of creating a new rendering of an input scene at least 60 times a second. Although computer graphics hardware has made staggering advances in terms of speed and freedom of programmability, there still exist a number of algorithms that are too expensive to be calculated in this time budget, like exact shadows or an exact global illumination solution. One way to circumvent this hard time limit is to capitalize on temporal coherence to formulate algorithms incremental in time. The main thesis of this work is that temporal coherence is a characteristic of real-time graphics that can be used to redesign well-known rendering methods to become faster, while exhibiting better visual fidelity. To this end we present our adaptations of algorithms from the fields of exact hard shadows, physically correct soft shadows and fast discrete LOD blending, in which we have successfully incorporated temporal coherence. Additionally, we provide a detailed context of previous work not only in the field of temporal coherence, but also in the respective fields of the presented algorithms.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "shadows, lod, real-time, image-space", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/scherzer-thesis/", } @phdthesis{termeer-2009-cvc, title = "Comprehensive Visualization of Cardiac MRI Data", author = "Maurice Termeer", year = "2009", abstract = "Coronary artery disease is one of the leading causes of death in the western world. The continuous improvements in magnetic resonance imaging technology facilitate more accurate diagnoses by providing increasingly more detailed information on the viability, functioning, perfusion, and anatomy of a patient’s heart. This increasing amount of information creates the need for more efficient and more effective means of processing these data. This thesis presents several novel techniques that facilitate a more comprehensive visualization of a patient’s heart to assist in the diagnosis of coronary artery disease using magnetic resonance imaging (MRI). The volumetric bull’s eye plot is introduced as an extension of an existing visualization technique used in clinical practice---the bull’s eye plot. This novel concept offers a more comprehensive view on the viability of a patient’s heart by providing detailed information on the transmurality of scar while not suffering from discontinuities. Anatomical context is often lost due to abstract representations of data, or may be scarce due to the nature of the scanning protocol. Several techniques to restore the relation to anatomy are presented. The primary coronary arteries are segmented in a whole heart scan and mapped onto a volumetric bull’s eye plot, adding anatomical context to an abstract representation. Similarly, segmented late enhancement data are rendered along with a three-dimensional segmentation of the patient-specific myocardial and coronary anatomy. Additionally, coronary supply territories are computed from patient-specific data as an improvement over models based on population averages. Information on the perfusion of the myocardium provided by MRI is typically of fairly low resolution. Using high-resolution anatomical data, an approach to visualize simulated myocardial perfusion is presented, taking full advantage of the detailed information on perfusion. Finally, a truly comprehensive visualization of a cardiac MRI exam is explored by combining whole heart, late enhancement, functional, and perfusion scans in a single visualization. The concepts introduced help to build a more comprehensive view of the patient and the additional information may prove to be beneficial for the diagnostic process.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Cardiac MRI Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/termeer-2009-cvc/", } @phdthesis{kohlmann-2009-lssl, title = "LiveSync: Smart Linking of 2D and 3D Views in Medical Applications", author = "Peter Kohlmann", year = "2009", abstract = "In this thesis two techniques for the smart linking of 2D and 3D views in medical applications are presented. Although real-time interactive 3D volume visualization is available even for very large data sets, it is used quite rarely in the clinical practice. A major obstacle for a better integration in the clinical workflow is the time-consuming process to adjust the parameters to generate diagnostically relevant images. The clinician has to take care of the appropriate viewpoint, zooming, transfer function setup, clipping planes, and other parameters. Because of this, current applications primarily employ 2D views generated through standard techniques such as multi-planar reformatting (MPR). The LiveSync interaction metaphor is a new concept to synchronize 2D slice views and 3D volumetric views of medical data sets. Through intuitive picking actions on the slice, the users define the anatomical structures they are interested in. The 3D volumetric view is updated automatically with the goal that the users are provided with diagnostically relevant images. To achieve this live synchronization a minimal set of derived information, without the need for segmented data sets or data-specific precomputations, is used. The presented system provides the physician with synchronized views which help to gain deeper insight into the medical data with minimal user interaction. Contextual picking is a novel method for the interactive identification of contextual interest points within volumetric data by picking on a direct volume rendered image. In clinical diagnostics the points of interest are often located in the center of anatomical structures. In order to derive the volumetric position, which allows a convenient examination of the intended structure, the system automatically extracts contextual meta information from the DICOM (Digital Imaging and Communications in Medicine) images and the setup of the medical workstation. Along a viewing ray for a volumetric picking, the ray profile is analyzed to detect structures which are similar to predefined templates from a knowledge base. It is demonstrated that the obtained position in 3D can be utilized to highlight a structure in 2D slice views, to interactively calculate approximate centerlines of tubular objects, or to place labels at contextually-defined 3D positions.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/kohlmann-2009-lssl/", } @phdthesis{charalambos-thesis_hlod, title = "HLOD Refinement Driven by Hardware Occlusion Queries", author = "Jean Pierre Charalambos", year = "2008", abstract = "In order to achieve interactive rendering of complex models comprising several millions of polygons, the amount of processed data has to be substantially reduced. Level-ofdetail (LOD) methods allow the amount of data sent to the GPU to be aggressively reduced at the expense of sacrificing image quality. Hierarchical level-of-detail (HLOD) methods have proved particularly capable of interactive visualisation of huge data sets by precomputing levels-of-detail at different levels of a spatial hierarchy. HLODs support out-of-core algorithms in a straightforward way and allow an optimal balance between CPU and GPU load during rendering. Occlusion culling represents an orthogonal approach for reducing the amount of rendered primitives. Occlusion culling methods aim to quickly cull the invisible part of the model and render only its visible part. Most recent methods use hardware occlusion queries (HOQs) to achieve this task. The effects of HLODs and occlusion culling can be successfully combined. Firstly, nodes which are completely invisible can be culled. Secondly, HOQ results can be used for visible nodes when refining an HLOD model; according to the degree of visibility of a node and the visual masking perceptual phenomenon, then it could be determined that there would be no gain in the final appearance of the image obtained if the node were further refined. In the latter case, HOQs allow more aggressive culling of the HLOD hierarchy, further reducing the amount of rendered primitives. However, due to the latency between issuing an HOQ and the availability of its result, the direct use of HOQs for refinement criteria cause CPU stalls and GPU starvation. This thesis introduces a novel error metric, taking visibility information (gathered from HOQs) as an integral part of refining an HLOD model, this being the first approach within this context to the best of our knowledge. A novel traversal algorithm for HLOD refinement is also presented for taking full advantage of the introduced HOQ-based error metric. The algorithm minimises CPU stalls and GPU starvation by predicting HLOD refinement conditions using spatio-temporal coherence of visibility. Some properties of the combined approach presented here involve improved performance having the same visual quality (whilst our occlusion culling technique still remained conservative). Our error metric supports both polygon-based and point-based HLODs, ensuring full use of HOQ results (our error metrics take full advantage of the information gathered in HOQs). Our traversal algorithm makes full use of the spatial and temporal coherency inherent in hierarchical representations. Our approach can be straightforwardly implemented.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/charalambos-thesis_hlod/", } @phdthesis{fuchs-vortex, title = "The Visible Vortex - Interactive Analysis and Extraction of Vortices in Large Time-Dependent Flow Data Sets", author = "Raphael Fuchs", year = "2008", abstract = "Computational simulation of physical and chemical processes has become an essential tool to tackle questions from the field of fluid dynamics. Using current simulation packages it is possible to compute unsteady flow simulations for realistic scenarios. The resulting solutions are stored in large to very large grids in 2D or 3D, frequently time-dependent, with multi-variate results from the numeric simulation. With increasing complexity of simulation results, powerful analysis and visualization tools are needed to make sense of the computed information and answer the question at hand. To do this we need new approaches and algorithms to locate regions of interest, find important structures in the flow and analyze the behavior of the flow interactively. The main motives of this thesis are the extension of vortex detection criteria to unsteady flow and the combination of vortex detectors with interactive visual analysis. To develop an understanding for the simulation results it is necessary to compare attributes of the simulation to each other and to be able to relate them to larger structures such as vortices. It is shown how automatic feature detection algorithms can be combined with interactive analysis techniques such that both detection and analysis benefit. By extending and integrating vortex detectors into the process of visual analysis, it becomes possible to understand the impact of vortex structures on the development of the flow. Using real-world examples from the field of engine design we discuss how vortex structures can have critical impact on the performance of a prototype. We illustrate how interactive visual analysis can support prototype design and evaluation. Furthermore, we show that taking the unsteady nature of the flow into account improves the quality of the extracted structures.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Interactive Visual Analysis, Vortex Detection, Scientific Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/fuchs-vortex/", } @phdthesis{bruckner-2008-IIV, title = "Interactive Illustrative Volume Visualization", author = "Stefan Bruckner", year = "2008", abstract = "Illustrations are essential for the effective communication of complex subjects. Their production, however, is a difficult and expensive task. In recent years, three-dimensional imaging has become a vital tool not only in medical diagnosis and treatment planning, but also in many technical disciplines (e.g., material inspection), biology, and archeology. Modalities such as X-Ray Computed Tomography (CT) and Magnetic Resonance Imaging (MRI) produce high-resolution volumetric scans on a daily basis. It seems counter-intuitive that even though such a wealth of data is available, the production of an illustration should still require a mainly manual and time-consuming process. This thesis is devoted to the computer-assisted generation of illustrations directly from volumetric data using advanced visualization techniques. The concept of a direct volume illustration system is introduced for this purpose. Instead of requiring an additional modeling step, this system allows the designer of an illustration to work directly on the measured data. Abstraction, a key component of traditional illustrations, is used in order to reduce visual clutter, emphasize important structures, and reveal hidden detail. Low-level abstraction techniques are concerned with the appearance of objects and allow flexible artistic shading of structures in volumetric data sets. High-level abstraction techniques control which objects are visible. For this purpose, novel methods for the generation of ghosted and exploded views are introduced. The visualization techniques presented in this thesis employ the features of current graphics hardware to achieve interactive performance. The resulting system allows the generation of expressive illustrations directly from volumetric data with applications in medical training, patient education, and scientific communication.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "volume rendering, illustrative visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/bruckner-2008-IIV/", } @phdthesis{Ulbricht-2007-thesis, title = "Usability of Digital Cameras for Verifying Physically Based Rendering Systems ", author = "Christiane Ulbricht", year = "2007", abstract = "Das Ziel dieser Arbeit war es, eine einfache Szene so zu modellieren und zu rendern, dass sie so ident wie m\"{o}glich zu einer digitalen Photographie ist. Dabei war klar, dass es viele Hindernisse zu \"{u}berwinden galt, und den haupts\"{a}chlichen Wissensgewinn erhofften wir aus dem Verst\"{a}ndnis f\"{u}r diese vielen kleinen Schritte zu erzielen. Schlie{\ss}lich stellten sich die Hindernisse noch umfangreicher dar als angenommen, und so schr\"{a}nkte sich das Thema auf die Verwendbarkeit digitaler Kameras f\"{u}r die Problemstellung inklusive die notwendigen Bearbeitungsschritte zur Vergleichbarkeit der Werte ein. Dabei entstand auch eine neue Methode entwickelt um die Abweichung der menschlichen Wahrnehmung von der Sensorik einer Kamera zu kompensieren. ", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Ulbricht-2007-thesis/", } @phdthesis{giegl-2007-pcg, title = "Practical Computer Graphics Research, Game Development and the European GameTools Project", author = "Markus Giegl", year = "2007", abstract = "Computer graphics today plays an important role in 3D computer and video game creation, since all the images which are presented to the player are based on principles and algorithms devised from computer graphics research. One important aspect with regards to the believability and consecutively immersion of players within game worlds are shadows. Creating high-quality dynamic shadows in real-time is still a very open field of research. One real-time shadowing approach that is very appealing due to its apparent elegant simplicity is shadow mapping. The term "apparent" is used deliberately here, since the elegance of shadow mapping comes at a price, namely the discretization of the first-hit visibility problem to a grid storing occluder depth information with regards to the light source. In practice this often leads to shadowing artifacts, due to too little information being available to answer the visibility query with enough accuracy. In this thesis, a new family of shadow mapping algorithms is presented, which address the problem of the shadow map not containing enough information, This is done without requiring a shadow map grid too large to be stored in memory and which cannot be filled with depth information in real time. Based on a basic brute-force approach, two smart algorithms are presented, which work in a manner adaptive to the resolution requirements of the scene, speeding up the process by at least an order of magnitude. Another problem with relevance to the practical application of computer graphics to games is how to render a large number of objects fast enough to give the player a high level of responsiveness (and thereby, again, immersion). Noticing that due to perspective shortening objects farther away from the player do not need as much detail as objects which are nearer, computer graphics has come up with the concept of level of detail (LOD), where farther away objects use simpler representations; if done correctly, this leads to a large increase in rendering speed. While LOD has been studied extensively in computer graphics, one problem of the LOD technique dominating practical application, discrete LOD, has been largely ignored: In discrete LOD the simpler representations of an object are, as the name implies, discrete, i.e. they exist independently of each other. Switching between representations of different complexity by simply switching the representation in use leads to a disruptive discontinuity in player perception. This thesis presents a practical algorithm, that addresses this problem. Linked to this research in the computer graphics fields of shadows and level of detail, is the case study of the GameTools Project, a Europe-wide project, funded by the European Union, which had as its agenda to bring the results of current computer graphics research done within the project to European game developing companies and companies from neighboring fields. The author holds the position of Community Manager within the GameTools Project, and presents an overview over the project and a more detailed view of his work as Community Manager.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/giegl-2007-pcg/", } @phdthesis{zotti-2007-PhD, title = "Computer Graphics in Historical and Modern Sky Observations", author = "Georg Zotti", year = "2007", abstract = "This work describes work done in three areas of research where sky observations meet computer graphics. The whole topic covers several millennia of human history and posed combined challenges from fields including archaeology, astronomy, cultural heritage, digital image processing and computer graphics. The first part presents interdisciplinary work done in the fields of archaeo-astronomy, visualisation and virtual reconstruction. A novel diagram has been developed which provides an intuitive, easy visualisation to investigate archaeological survey maps for evidence of astronomically motivated orientation of buildings. This visualisation was developed and first applied to a certain class of neolithic circular structures in Lower Austria in order to investigate the idea of solar orientation of access doorways. This diagram and its intuitive interpretation allowed the author to set up a new hypothesis about practical astronomical activities in the middle neolithic period in central Europe. How virtual reconstructions of these buildings characteristic for a short time during the neolithic epoch can be combined with the excellent sky simulation of a modern planetarium to communicate these results to a broader audience is described thereafter. The second part of this work describes a certain class of historical scientific instruments for sky observations and its reconstruction with methods of computer graphics. Long after the stone age, in the Middle Ages, the astrolabe was the most celebrated instrument for celestial observations and has been explained in contemporary literature, usually with the help of precomputed tables for a certain size or kind of instrument. Today, historical exhibitions frequently present one of these instruments, but its various applications are hard to explain to the general audience without hands-on demonstration. For this challenge from the cultural heritage domain, an approach using the idea of procedural modelling is presented. Here, a computer graphics model is not statically drawn but specified by parametrised plotting functions, which can then be repeatedly executed with different parameters to create the final model. This approach is demonstrated to provide a very flexible solution which can immediately be applied to specific needs just by tweaking a few parameters, instead of having to repetitively draw the whole model manually. From the two-dimensional procedural model, 3D models can be easily created, and even the production of wooden instruments on a Laser engraver/plotter is demonstrated. The third and longest part deals with methods of sky simulation and rendering in the domain of computer graphics. In this discipline, modelling of skylight and atmospheric effects has developed tremendously over the last two decades, which is covered by an extensive survey of literature from the computer graphics and also atmosphere physics domains. The requirements of physically correct or at least plausible rendering include realistic values for sky brightness. Measurements performed with a luminance meter on a clear sky in order to verify the currently most widely used analytic skylight model [Preetham 1999] shows however its limited applicability. There are two classical groups of clear-sky models: numerical simulations of scattering in the atmosphere, and fast analytical models. Recently, another method for more realistic looking skylight models has been developed: digital images taken with a fisheye lens are combined into high dynamic range images which can be used for scene illumination and as sky background. These images can be calibrated by photometric measurements of absolute luminance values. Long-time exposures allow to apply this system to quantitative investigations of sky brightness, sky colours, and also nocturnal light pollution by artificial illumination. Results and other applications of the system are described, and the pipeline for creating such images is described in the appendix. This work closes with some notes of future directions of research.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Astrolabe, Archaeo-Astronomy, Skydome Visualisation, Cultural Heritage, Sky Measurements", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/zotti-2007-PhD/", } @phdthesis{mantler-2007-rtv, title = "Interactive Vegetation Rendering", author = "Stephan Mantler", year = "2007", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/mantler-2007-rtv/", } @phdthesis{straka-phd-thesis, title = "Processing and Visualization of Peripheral CT-Angiography Datasets", author = "Mat\'{u}s Straka", year = "2006", abstract = "In this thesis, individual steps of a pipeline for processing of the peripheral Computed Tomography Angiography (CTA) datasets are addressed. The peripheral CTA datasets are volumetric datasets representing pathologies in vascularity of the lower extremities in the human body. These pathologies result from various atherosclerotic diseases as e.g. the Peripheral Arterial Occlusive Disease (PAOD) and their early and precise diagnostics significantly contributes to planning of a later interventional radiology treatment. The diagnostics is based on visualization of the imaged vascular tree, where individual pathologic changes, as plaque, calcifications, stenoses of the vessel lumen and occluded parts of the vessels are visible. CTA has evolved within the recent years into a robust, accurate and cost effective imaging technique for patients with both coronary and arterial diseases. As a result of the CTA scanning, a set of 1200–2000 transverse slices is acquired, depicting vessels enhanced by means of an intravenously injected contrast medium. The number of slices is high and therefore their manual examination is laborious and lengthy. As a remedy, post-processing methods were developed to allow faster and more intuitive visualization of the imaged vascularity. However, simple visualization by means of the traditional techniques as maximum-intensity projection (MIP) or direct volume rendering (DVR) is hampered due to the presence of bones in the dataset, which occlude the vessels. Therefore, a sequence of operations—the processing pipeline—is needed, leading to generation of clinically relevant images which depict unobstructed vessels. In the first step of the pipeline the dataset is segmented and the tissues are classified, to allow subsequent vessel identification and bone removal. This is a complex task because of high density and spatial variability of the tissues. Traditional image processing techniques do not deliver acceptable results and therefore in the thesis we present new approaches that introduce additional ’anatomic’ information into the segmentation and classification process. We propose a probabilistic atlas which enables modeling of spatial and density distributions of vessel and bone tissues in datasets, to allow their improved classification. In the atlas construction the non-rigid thin-plate spline warping and registration of the datasets are applied, to address the high anatomic variability among the patients. The concept of the atlas is further extended by means of the watershed transform, to further improve precision of the registration procedure. Alternatively, we propose and evaluate a technique for vessel enhancement based on Hessian filtering to allow detection and recognition of vessel structures without operator supervision. In the second step a geometric model of the vessel tree is constructed to derive information about the vessel centerlines. Here, an already available algorithm based on the so-called vessel-tracking, implemented by means of optimal path searching, is exploited with improvements to make the geometric model more precise. The third step of the processing pipeline—visualization—requires this model, since its results can be significantly influenced by a potential imperfections, bringing in clinically misleading images. To address limitations of the vessel visualization by means of the existing techniques as MIP, CPR or DVR we propose their generalization in form of a focus & context-based concept called VesselGlyph. VesselGlyph enables to combine intuitively and systematically various visualization techniques to single a image to allow better, more comprehensive and unoccluded view of vessels for the diagnostic purposes. To support the design and development of the proposed segmentation, modeling and visualization algorithms and to enable their application in the clinical environment, we implemented a set of tools grouped in the AngioVis ToolBox software. Within this application, individual steps of the processing pipeline are accomplished. The toolbox is complemented with additional utilities constituting together a fully-functional medical workstation software which is regularly used to process patient data on a daily basis in the clinical environment.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Visualization, Medial Data Processing, Segmentation, Vessel Modeling, 3D Reconstruction, Vessel Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/straka-phd-thesis/", } @phdthesis{Cruz-thesis, title = "3D Modelling and Reconstruction of Peripheral Vascular Structure", author = "Alexandra La Cruz", year = "2006", abstract = "A model is a simplified representation of an object. The modeling stage could be described as shaping individual objects that are later used in the scene. For many years scientists are trying to create an appropriate model of the blood vessels. It looks quite intuitive to believe that a blood vessel can be modeled as a tubular object, and this is true, but the problems appear when you want to create an accurate model that can deal with the wide variability of shapes of diseased blood vessels. From the medical point of view it is quite important to identify, not just the center of the vessel lumen but also the center of the vessel, particularly in the presences of some anomalies, which is the case diseased blood vessels. An accurate estimation of vessel parameters is a prerequisite for automated visualization and analysis of healthy and diseased blood vessels. We believe that a model-based technique is the most suitable one for parameterizing blood vessels. The main focus of this work is to present a new strategy to parameterize diseased blood vessels of the lower extremity arteries. The first part presents an evaluation of different methods for approximating the centerline of the vessel in a phantom simulating the peripheral arteries. Six algorithms were used to determine the centerline of a synthetic peripheral arterial vessel. They are based on: ray casting using thresholds and a maximum gradient-like stop criterion, pixel-motion estimation between successive images called block matching, center of gravity and shape based segmentation. The Randomized Hough Transform and ellipse fitting have been used as shape based segmentation techniques. Since in the synthetic data set the centerline is known, an estimation of the error can be calculated in order to determine the accuracy achieved by a given method. The second part describes an estimation of the dimensions of lower extremity arteries, imaged by computed tomography. The vessel is modeled using an elliptical or cylindrical structure with specific dimensions, orientation and CT attenuation values. The model separates two homogeneous regions: Its inner side represents a region of density for vessels, and its outer side a region for background. Taking into account the point spread function of a CT scanner, which is modeled using a Gaussian kernel, in order to smooth the vessel boundary in the model. An optimization process is used to find the best model that fits with the data input. The method provides center location, diameter and orientation of the vessel as well as blood and background mean density values. The third part presents the result of a clinical evaluation of our methods, as a prerequisite step for being used in clinical environment. To perform this evaluation, twenty cases from available patient data were selected and classified as 'mildly diseased' and 'severely diseased' datasets. Manual identification was used as our reference standard. We compared the model fitting method against a standard method, which is currently used in the clinical environment. In general, the mean distance error for every method was within the inter-operator variability. However, the non-linear model fitting technique based on a cylindrical model shows always a better center approximation in most of the cases, 'mildly diseased' as well as 'severely diseased' cases. Clinically, the non-linear model fitting technique is more robust and presented a better estimation in most of the cases. Nevertheless, the radiologists and clinical experts have the last word with respect to the use of this technique in clinical environment.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Vessel Visualization, 3D Modeling, Segmentation, 3D Reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/Cruz-thesis/", } @phdthesis{Mlejnek_2006_MVOA, title = "Medical Visualization for Orthopedic Applications", author = "Matej Mlejnek", year = "2006", abstract = "This dissertation discusses visualization techniques of articular cartilage for both quantitative and qualitative assessment. Articular cartilage is a very thin structure covering the endings of human bones. Thus, even slight changes in its thickness and inner structure may indicate an occurrence or progress of degeneration. The early detection of these factors is crucial for diagnosis and treatment of cartilage degeneration. Research to find treatments to stop or even reverse these degenerative changes is well in progress. Magnetic resonance imaging is currently the best and most used non-invasive technique for acquisition of soft tissue structures like articular cartilage. In this work we use two types of data: a high-resolution anatomical scan of the cartilage and a T2 map, which is computed from a set of sequences with varying parameters. While the thickness of the data can be precisely assessed fromthe anatomical scan, the T2 map offers information on the inner structures of the cartilage. Since the femoral cartilage is a curved thin-wall structure with a relatively small extent in one direction, it is very difficult to estimate its thickness from a stack of slices or even from a three-dimensional reconstruction of its surface. We discuss inspection of the tissue by unfolding and, thus, representing the tissue as a two-dimensional height field. Such a transformation of the object enables the application of 2D geometrical operations in order to observe subtle details in the thickness of the tissue. Nowadays scanners allow a quality assessment checking disruptions in the pattern of the T2 map of the patellar cartilage. The T2 map illustrates the quality of the cartilage and changes in the pattern of T2 map indicate defects before changes in the thickness itself occur. We propose the Profile Flags - an intuitive interface for probing of the T2 maps by browsing the reconstructed surface of the cartilage. The Profile Flag is positioned on the reconstructed surface of the tissue, and can be moved along it. The Profile Flags can be applied to annotate local as well as global characteristics of the underlying data in a single still image. Furthermore, we present a set of extensions to Profile Flags for selection, classification and automatic positioning. Profile Flags can also be used to measure time-varying dynamic contrast enhanced magnetic resonance imaging data.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "applications of visualization, visualization in medicine", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/Mlejnek_2006_MVOA/", } @phdthesis{phd-viola, title = "Importance-Driven Expressive Visualization", author = "Ivan Viola", year = "2005", abstract = "In this thesis several expressive visualization techniques for volumetric data are presented. The key idea is to classify the underlying data according to its prominence on the resulting visualization by importance value. The importance property drives the visualization pipeline to emphasize the most prominent features and to suppress the less relevant ones. The suppression can be realized globally, so the whole object is suppressed, or locally. A local modulation generates cut-away and ghosted views because the suppression of less relevant features occurs only on the part where the occlusion of more important features appears. Features within the volumetric data are classified according to a new dimension denoted as object importance. This property determines which structures should be readily discernible and which structures are less important. Next, for each feature various representations (levels of sparseness) from a dense to a sparse depiction are defined. Levels of sparseness define a spectrum of optical properties or rendering styles. The resulting image is generated by ray-casting and combining the intersected features proportional to their importance. An additional step to traditional volume rendering evaluates the areas of occlusion and assigns a particular level of sparseness. This step is denoted as importance compositing. Advanced schemes for importance compositing determine the resulting visibility of features and if the resulting visibility distribution does not correspond to the importance distribution different levels of sparseness are selected. The applicability of importance-driven visualization is demonstrated on several examples from medical diagnostics scenarios, flow visualization, and interactive illustrative visualization.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/phd-viola/", } @phdthesis{neubauer-2005-vir, title = "Virtual Endoscopy for Preoperative Planning and Training of Endonasal Transsphenoidal Pituitary Surgery", author = "Andre Neubauer", year = "2005", abstract = "Virtual endoscopy is the navigation of a virtual camera through anatomy, computationally reconstructed from radiological data. Virtual endoscopy mimics physical minimally invasive surgical activity and is used for diagnosis (e.g., the detection of colon polyps), planning of endoscopic interventions, postoperative assessment of surgical success and training for inexperienced endoscopists. This thesis introduces STEPS, a virtual endoscopy system designed as a planning and training tool for endonasal transsphenoidal pituitary surgery, a method used to minimally invasively remove tumors of the pituitary gland. A rigid endoscope is inserted into the nose and advanced through intracranial cavities towards the bony wall covering the pituitary gland. This bone is then opened and the tumor is removed. STEPS reconstructs the boundaries of the investigated cavities primarily using iso-surfacing in original CT data. Presegmented objects of interest can be added to the scene and displayed behind the semi-transparent isosurface. These objects (e.g., the tumor, the pituitary gland and important blood vessels) provide an augmented picture of patient anatomy to guide the surgeon, aid in planning the endoscopic approach, and help the user find an ideal site for actual surgical activity. Visual information can be further enhanced by display of rigid structures beyond the isosurface. The user can freely decide upon colors and lighting conditions. All rendering techniques applied by STEPS are completely CPU-based, ensuring a high amount of flexibility and hardware-independence. Nevertheless, rendering is efficient resulting in smooth interaction. STEPS allows free navigation through the nasal and paranasal anatomy, but can also be used to simulate the movement parameters of the rigid endoscopes. This includes simulation of surgical instruments and haptic feedback. Furthermore, STEPS allows the simulation of angled endoscopes and the simulation of barrel distortion as exhibited by real endoscope optics. This thesis gives an overview about existing techniques and applications of virtual endoscopy, introduces the field of application, and, in detail, describes STEPS, the required preprocessing, the rendering techniques and the user interface.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/neubauer-2005-vir/", } @phdthesis{Grimm-thesis, title = "Real-Time Mono- and Multi-Volume Rendering of Large Medical Datasets on Standard PC Hardware", author = "S\"{o}ren Grimm", year = "2005", abstract = "Direct Volume Visualization is an efficient technique to explore complex structures within volumetric data. Its main advantage, compared to standard 3D surface rendering, is the ability to perform semitransparent rendering in order to provide more information about spatial relationships of different structures. Semitransparent rendering requires to process a huge amount of data. The size of volumetric data is rapidly increasing, on the one hand due to the boost of processing power in the past years, and on the other hand due to improved capabilities of newer acquisition devices. This large data presents a challenge to current rendering architectures and techniques. The enormous data sizes introduce a growing demand for interactive 3D visualization. Conventional slicing methods already reach their limit of usability due to the enormous amount of slices. 3D visualization is more and more explored as an attractive alternative additional method for examinations of large medical data to support the necessary 2D examination. Within this dissertation a set of approaches to handle and render large volumetric data is developed, enabling significant performance improvements due to a much better utilization of the CPUs processing power and available memory bandwidth. At first, highly efficient approaches for addressing and processing of a cache efficient memory layout for volumetric data are presented. These approaches serve as a base for a full-blown high-quality raycasting system, capable of handling large data up to 3GB, a limitation imposed by the virtual address space of current consumer operating systems. The core acceleration techniques of this system are a refined caching scheme for gradient estimation in conjunction with a hybrid skipping and removal of transparent regions to reduce the amount of data to be processed. This system is extended so that efficient processing of multiple large data sets is possible. An acceleration technique for direct volume rendering of scenes, composed of multiple volumetric objects, is developed; it is based on the distinction between regions of intersection, which need costly multi-volume processing, and regions containing only one volumetric object, which can be efficiently processed. Furthermore, V-Objects, a concept of modeling scenes consisting of multiple volumetric objects, are presented. It is demonstrated that the concept of V-Objects in combination with direct volume rendering, is a promising technique for visualizing medical data and can provide advanced means to explore and investigate data. In the second part of the dissertation, an alternative to grid-based volume graphics is presented: Vots, a point-based representation of volumetric data. It is a novel primitive for volumetric data modeling, processing, and rendering. A new paradigm is presented by moving the data representation from a discrete representation to an implicit one.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/Grimm-thesis/", } @phdthesis{jeschke-05-ARI, title = "Accelerating the Rendering Process Using Impostors", author = "Stefan Jeschke", year = "2005", abstract = "The interactive rendering of three-dimensional geometric models is a research area of big interest in computer graphics. The generation of a fluent animation for complex models, consisting of multiple million primitives, with more than 60 frames per second is a special challenge. Possible applications include ship-, driving- and flight simulators, virtual reality and computer games. Although the performance of common computer graphics hardware has dramatically increased in recent years, the demand for more realism and complexity in common scenes is growing even faster. This dissertation is about one approach for accelerating the rendering of such complex scenes. We take advantage of the fact that the appearance of distant scene parts hardly changes for several successive output images. Those scene parts are replaced by precomputed image-based representations, so-called impostors. Impostors are very fast to render while maintaining the appearance of the scene part as long as the viewer moves within a bounded viewing region, a so-called view cell. However, unsolved problems of impostors are the support of a satisfying visual quality with reasonable computational effort for the impostor generation, as well as very high memory requirements for impostors for common scenes. Until today, these problems are the main reason why impostors are hardly used for rendering acceleration. This thesis presents two new impostor techniques that are based on partitioning the scene part to be represented into image layers with different distances to the observer. A new error metric allows a guarantee for a minimum visual quality of an impostor even for large view cells. Furthermore, invisible scene parts are efficiently excluded from the representation without requiring any knowledge about the scene structure, which provides a more compact representation. One of the techniques combines every image layer separately with geometric information. This allows a fast generation of memory-efficient impostors for distant scene parts. In the other technique, the geometry is independent from the depth layers, which allows a compact representation for near scene parts. The second part of this work is about the efficient usage of impostors for a given scene. The goal is to guarantee a minimum frame rate for every view within the scene while at the same time minimizing the memory requirements for all impostors. The presented algorithm automatically selects impostors and view cells so that for every view, only the most suitable scene parts are represented as impostors. Previous approaches generated numerous similar impostors for neighboring view cells, thus wasting memory. The new algorithm overcomes this problem. i The simultaneous use of additional acceleration techniques further reduces the required impostor memory and allows making best use of all available techniques at the same time. The approach is general in the sense that it can handle arbitrary scenes and a broad range of impostor techniques, and the acceleration provided by the impostors can be adapted to the bottlenecks of different rendering systems. In summary, the provided techniques and algorithms dramatically reduce the required impostor memory and simultaneously guarantee a minimum output image quality. This makes impostors useful for numerous scenes and applications where they could hardly be used before.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "image-based rendering, impostors, rendering acceleration", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-ARI/", } @phdthesis{doleisch-thesis, title = "Visual Analysis of Complex Simulation Data using Multiple Heterogenous Views", author = "Helmut Doleisch", year = "2004", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/doleisch-thesis/", } @phdthesis{Artusi-thesis, title = "Real time tone Mapping", author = "Alessandro Artusi", year = "2004", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Artusi-thesis/", } @phdthesis{Hadwiger-thesis, title = "High-Quality Visualization and Filtering of Textures and Segmented Volume Data on Consumer Graphics Hardware", author = "Markus Hadwiger", year = "2004", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Hadwiger-thesis/", } @phdthesis{Kanitsar-thesis, title = "Curved Planar Reformation for Vessel Visualization", author = "Armin Kanitsar", year = "2004", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Kanitsar-thesis/", } @phdthesis{Laramee-2004-thesis, title = "Interactive 3D Flow Visualization Based on Textures and Geometric Primitives", author = "Robert S. Laramee", year = "2004", abstract = "This thesis presents research in the area of flow visualization. The theoretical framework is based on the notion that flow visualization methodology can be classified into four main areas: direct, geometric, texture-based, and feature-based flow visualization. Our work focuses on the direct, geometric, and texture-based categories, with special emphasis on texture-based approaches. After presenting the state-of-the-art, we discuss a technique for resampling of CFD simulation data. The resampling tool addresses both the perceptual problems resulting from a brute force hedgehog visualization and flow field coverage problems. These challenges are handled by giving the user control of the resolution of the resampling grid in object space and giving the user precise control of where to place the vector glyphs. Afterward, we present a novel technique for visualization of unsteady flow on surfaces from computational fluid dynamics. The method generates dense representations of time-dependent vector fields with high spatio-temporal correlation. While the 3D vector fields are associated with arbitrary triangular surface meshes, the generation and advection of texture properties is confined to image space. Frame rates of up to 60 frames per second are realized by exploiting graphics card hardware. We apply this algorithm to unsteady flow on boundary surfaces of, large, complex meshes from computational fluid dynamics composed of more than 200,000 polygons, dynamic meshes with time-dependent geometry and topology, as well as medical data. We also apply texture-based flow visualization techniques to isosurfaces. The result is a combination of two well known scientific visualization techniques, namely iso-surfacing and texture-based flow visualization, into a useful hybrid approach. Next we describe our collection of geometric flow visualization techniques including oriented streamlines, streamlets, a streamrunner tool, streamcomets, and a real-time animated streamline technique. We place special emphasis on necessary measures required in order for geometric techniques to be applicable to real-world data sets. In order to demonstrate the use of all techniques, we apply our direct, geometric, and texture-based flow visualization techniques to investigate swirl and tumble motion, two flow patterns found commonly in computational fluid dynamics (CFD). Our work presents a visual analysis of these motions across three spatial domains: 2D slices, 2.5D surfaces, and 3D. ", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Laramee-2004-thesis/", } @phdthesis{Hey-thesis, title = "Photorealistic and Hardware Accelerated Rendering of Complex Scenes", author = "Heinrich Hey", year = "2002", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Hey-thesis/", } @phdthesis{Maierhofer-thesis, title = "Rule-Based Mesh Growing and Generalized Subdivision Meshes", author = "Stefan Maierhofer", year = "2002", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Maierhofer-thesis/", } @phdthesis{Dorfmueller-Ulhaas-thesis, title = "Optical Tracking - From User Motion To 3D Interaction", author = "Klaus Dorfm\"{u}ller-Ulhaas", year = "2002", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Dorfmueller-Ulhaas-thesis/", } @phdthesis{Wimmer-thesis, title = "Representing and Rendering Distant Objects for Real-Time Visualization", author = "Michael Wimmer", year = "2001", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wimmer-thesis/", } @phdthesis{Mroz-thesis, title = "Real-Time Volume Visualization on Low-End Hardware", author = "Lukas Mroz", year = "2001", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Mroz-thesis/", } @phdthesis{Wonka-thesis, title = "Occlusion Culling for Real-Time Rendering of Urban Environments", author = "Peter Wonka", year = "2001", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wonka-thesis/", } @phdthesis{Hesina-thesis, title = "Distributed Collaborative Augmented Reality", author = "Gerd Hesina", year = "2001", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Hesina-thesis/", } @phdthesis{Koenig-thesis, title = "Usability Issues in Medical Volume Visualization", author = "Andreas K\"{o}nig", year = "2001", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Koenig-thesis/", } @phdthesis{Kosara-thesis, title = "Semantic Depth of Field - Using Blur for Focus + Context Visualization", author = "Robert Kosara", year = "2001", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Kosara-thesis/", } @phdthesis{Wilkie-thesis, title = "Photon Tracing for Complex Environments", author = "Alexander Wilkie", year = "2001", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wilkie-thesis/", } @phdthesis{Buehler-thesis, title = "A new Subdivision Algorithm for the Intersection of Parametric Surfaces", author = "Katja B\"{u}hler", year = "2001", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Buehler-thesis/", } @phdthesis{Neumann-thesis, title = "Constructions of Bidirectional Reflection Distribution Functions", author = "Attila Neumann", year = "2001", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Neumann-thesis/", } @phdthesis{Prikryl-thesis, title = "Perceptually Driven Radiosity", author = "Jan Prikryl", year = "2001", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Prikryl-thesis/", } @phdthesis{Bartroli-thesis, title = "Visualization Techniques for Virtual Endoscopy", author = "Anna Vilanova i Bartroli", year = "2001", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Bartroli-thesis/", } @phdthesis{Hladuvka-thesis, title = "Derivatives and Eigensystems for Volume-Data Analysis and Visualization", author = "Ji\v{r}\'{i} Hlad\r{u}vka", year = "2001", pages = "116", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Volumendaten, Datenanalyse, Visualisierung, Klassifikation", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Hladuvka-thesis/", } @phdthesis{Csebfalvi-thesis, title = "Interactive Volume-Rendering Techniques for Medical Data Visualization", author = "Bal\'{a}zs Cs\'{e}bfalvi", year = "2001", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Csebfalvi-thesis/", } @phdthesis{Faisstnauer-thesis, title = "Priority Scheduling for Networked Virtual Environments", author = "Christian Faisstnauer", year = "2001", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Faisstnauer-thesis/", } @phdthesis{Fuhrmann-thesis, title = "Studierstube: a Collaborative Virtual Environment for Scientific Visualization", author = "Anton Fuhrmann", year = "1999", pages = "149", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1999/Fuhrmann-thesis/", } @phdthesis{Szalavari-thesis, title = "The Personal Interaction Panel - a two-handed Interface for Augmented Reality", author = "Zsolt Szalav\'{a}ri", year = "1999", pages = "137", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Virtuelle Realit\"{a}t, Schnittstelle", URL = "https://www.cg.tuwien.ac.at/research/publications/1999/Szalavari-thesis/", } @phdthesis{Hauser-thesis, title = "Visualization of Dynamic Systems", author = "Helwig Hauser", year = "1998", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1998/Hauser-thesis/", } @phdthesis{Dimitrov-thesis, title = "Volume Visualization", author = "Leonid Dimitrov", year = "1998", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1998/Dimitrov-thesis/", } @phdthesis{Tobler-thesis, title = "Global Illumination using Stochastic Radiosity for Constructive Solid Geometry", author = "Robert F. Tobler", year = "1997", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1997/Tobler-thesis/", } @phdthesis{Matkovic-thesis, title = "Tone Mapping Techniques and Color Image Difference in Global Illumination", author = "Kresimir Matkovic", year = "1997", pages = "120", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1997/Matkovic-thesis/", } @phdthesis{Traxler-thesis1, title = "Modeling and Realistic Rendering of Natural Scenes with Cyclic CSG-Graphs", author = "Christoph Traxler", year = "1997", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1997/Traxler-thesis1/", } @phdthesis{Wegenkittl-thesis, title = "Visualization of Complex Dynamical Systems", author = "Rainer Wegenkittl", year = "1997", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1997/Wegenkittl-thesis/", } @phdthesis{Schmalstieg-thesis, title = "The Remote Rendering Pipeline - Managing Geometry and Bandwidth in Distributed Virtual Environments", author = "Dieter Schmalstieg", year = "1997", pages = "146", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Dreidimensionale Computergrafik, Interaktion, Virtuelle Realit\"{a}t, Verteiltes System, Modell", URL = "https://www.cg.tuwien.ac.at/research/publications/1997/Schmalstieg-thesis/", } @phdthesis{Aram-thesis, title = "Der Vierphasen-Wegsuchalgorithmus zur Berechnung von Rohrleitungsf\"{u}hrungen im Anlagenbau", author = "Parham Aram", year = "1996", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1996/Aram-thesis/", } @phdthesis{Sramek-thesis, title = "Visualization of Volumetric Data by Ray Tracing", author = "Milo\v{s} \v{S}r\'{a}mek", year = "1996", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1996/Sramek-thesis/", } @phdthesis{Mazuryk-thesis, title = "Prediction Techniques", author = "Tomasz Mazuryk", year = "1996", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1996/Mazuryk-thesis/", } @phdthesis{Ferschin-thesis, title = "A Two-Pass Radiosity Method for CSG Models", author = "Peter Ferschin", year = "1996", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1996/Ferschin-thesis/", } @phdthesis{Tastl-thesis, title = "Psychophysikalische Studien zur empfindungsgem\"{a}{\ss}en Gleichf\"{o}rmigkeit des erweiterten CIELUV Farbsystems", author = "Ingeborg Tastl", year = "1995", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1995/Tastl-thesis/", } @phdthesis{Feda-thesis, title = "Efficient Radiosity Algorithms", author = "Martin Feda", year = "1994", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1994/Feda-thesis/", } @phdthesis{Mueller-thesis, title = "Data Space Semantics", author = "Heimo M\"{u}ller", year = "1994", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1994/Mueller-thesis/", } @phdthesis{Zeiller-thesis, title = "Collision Detection for Complex Objects in Computer Animation", author = "Michael Zeiller", year = "1994", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1994/Zeiller-thesis/", } @phdthesis{Schrotta-thesis, title = "Collision Detection of Animated CT/NMR Joint Reconstruction", author = "Margit Schrotta", year = "1994", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1994/Schrotta-thesis/", } @phdthesis{Haider-thesis, title = "Methoden des Farbmanagements", author = "Franz Haider", year = "1993", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1993/Haider-thesis/", } @phdthesis{Hiess-thesis, title = "Ray Tracing Contour-Based-Solids", author = "Gerhard Hie{\ss}", year = "1992", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1992/Hiess-thesis/", } @phdthesis{Bracher-thesis, title = "Modellsimulation von Atmosph\"{a}rischen Einfl\"{u}ssen auf horizontal aufgenommene photographische Bilder", author = "Roland Bracher", year = "1992", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1992/Bracher-thesis/", } @phdthesis{Groeller-thesis, title = "Coherence in Computer Graphics", author = "Eduard Gr\"{o}ller", year = "1992", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1992/Groeller-thesis/", } @phdthesis{benke-1991-comp, title = "Computeranimation f\"{u}r Diskrete Ereignissimulation", author = "Ch. Benke", year = "1991", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1991/benke-1991-comp/", } @phdthesis{clauer-1989-eine, title = "Eine aufl\"{o}sungsunabh\"{a}ngige, farbinterpolierende Bilddarstellung und ihre Anwendung im Hochqualit\"{a}tsdruck", author = "Alexander Clauer", year = "1989", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1989/clauer-1989-eine/", } @phdthesis{Gervautz-thesis, title = "Improvements of the Ray-tracing Algorithm", author = "Michael Gervautz", year = "1987", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1987/Gervautz-thesis/", } @phdthesis{purgathofer-1984-iden, title = "Identifikation und verzerrende Skalierung f\"{u}r eine h\"{o}here graphische Programmiersprache", author = "Werner Purgathofer", year = "1984", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1984/purgathofer-1984-iden/", } @phdthesis{dirnberger-1981-port, title = "Ein portables Mehrger\"{a}tesystem f\"{u}r die h\"{o}here graphische Programmiersprache PASCAL/Bild", author = "Josef Dirnberger", year = "1981", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1981/dirnberger-1981-port/", }