@article{haidacher-2011-VAM,
title = "Volume Analysis Using Multimodal Surface Similarity",
author = "Martin Haidacher and Stefan Bruckner and Eduard Gr\"{o}ller",
year = "2011",
abstract = "The combination of volume data acquired by multiple
modalities has been recognized as an important but
challenging task. Modalities often differ in the structures
they can delineate and their joint information can be used
to extend the classification space. However, they frequently
exhibit differing types of artifacts which makes the process
of exploiting the additional information non-trivial. In
this paper, we present a framework based on an
information-theoretic measure of isosurface similarity
between different modalities to overcome these problems. The
resulting similarity space provides a concise overview of
the differences between the two modalities, and also serves
as the basis for an improved selection of features.
Multimodal classification is expressed in terms of
similarities and dissimilarities between the isosurfaces of
individual modalities, instead of data value combinations.
We demonstrate that our approach can be used to robustly
extract features in applications such as dual energy
computed tomography of parts in industrial manufacturing.",
month = oct,
journal = "IEEE Transactions on Visualization and Computer Graphics",
volume = "17",
number = "12",
pages = "1969--1978",
keywords = "surface similarity, volume visualization, multimodal data",
URL = "https://www.cg.tuwien.ac.at/research/publications/2011/haidacher-2011-VAM/",
}
@phdthesis{haidacher-2011-phd,
title = "Information-based Feature Enhancement in Scientific
Visualization",
author = "Martin Haidacher",
year = "2011",
abstract = "Scientific visualization is a research area which gives
insight into volumetric data acquired through measurement or
simulation. The visualization allows a faster and more
intuitive exploration of the data. Due to the rapid
development in hardware for the measurement and simulation
of scientific data, the size and complexity of data is
constantly increasing. This has the benefit that it is
possible to get a more accurate insight into the measured or
simulated phenomena. A drawback of the increasing data size
and complexity is the problem of generating an expressive
representation of the data. Since only certain parts of the
data are necessary to make a decision, it is possible to
mask parts of the data along the visualization pipeline to
enhance only those parts which are important in the
visualization. For the masking various properties are
extracted from the data which are used to classify a part as
important or not. In general a transfer function is used for
this classification process which has to be designed by the
user. In this thesis three novel approaches are presented
which use methods from information theory and statistics to
enhance features from the data in the classification process
that are important for a certain task. With the tools of
information theory and statistics it is possible to extract
properties from the data which are able to classify
different materials or tissues in the data better than
comparable other approaches. One approach adaptively
extracts statistical properties, i.e. the mean value and the
standard deviation, of the data values in the local
neighborhood of each point in the data set. With these
statistical properties it is possible to better distinguish
between different materials in a data set even though the
data is very noisy. The other two approaches in this thesis
employ methods from information theory to extract features
from multimodal data sets. Thus it is possible to enhance
features of the data which are either very similar or very
dissimilar in both modalities. Through information theory
the variations in the value ranges of both modalities do not
influence the classification of these features. All three
approaches define novel transfer-function spaces which
simplify the design process of a transfer function for the
user. Different features of the data, such as different
materials, can be clearly depicted in these spaces.
Therefore, it is easier for a user to design a transfer
function which enhances the features of importance for a
certain task. For each of the new approaches results and
comparisons to other existing techniques are shown to
highlight the usefulness of the proposed methods. Through
the described research it is shown that information theory
and statistics are tools which are able to extract
expressive properties from the data. In the introduction a
broad overview over scientific visualization and the
visualization pipeline is given. The classification process
is described in more detail. Since information theory and
statistics play an important role for all three approaches,
a brief introduction to these concepts is given as well.",
address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria",
school = "Institute of Computer Graphics and Algorithms, Vienna
University of Technology ",
keywords = "Scientific visualization, Information theory, Volume
classification",
URL = "https://www.cg.tuwien.ac.at/research/publications/2011/haidacher-2011-phd/",
}
@inproceedings{haidacher_2010_statTF,
title = "Volume Visualization based on Statistical Transfer-Function
Spaces",
author = "Martin Haidacher and Daniel Patel and Stefan Bruckner and
Armin Kanitsar and Eduard Gr\"{o}ller",
year = "2010",
abstract = "It is a difficult task to design transfer functions for
noisy data. In traditional transfer-function spaces, data
values of different materials overlap. In this paper we
introduce a novel statistical transfer-function space which
in the presence of noise, separates different materials in
volume data sets. Our method adaptively estimates
statistical properties, i.e. the mean value and the standard
deviation, of the data values in the neighborhood of each
sample point. These properties are used to define a
transfer-function space which enables the distinction of
different materials. Additionally, we present a novel
approach for interacting with our new transfer-function
space which enables the design of transfer functions based
on statistical properties. Furthermore, we demonstrate that
statistical information can be applied to enhance visual
appearance in the rendering process. We compare the new
method with 1D, 2D, and LH transfer functions to demonstrate
its usefulness.",
month = mar,
booktitle = "Proceedings of the IEEE Pacific Visualization 2010",
pages = "17--24",
keywords = "transfer function, statistics, shading, noisy data,
classification",
URL = "https://www.cg.tuwien.ac.at/research/publications/2010/haidacher_2010_statTF/",
}
@inproceedings{patel_2009_MC,
title = "Moment Curves",
author = "Daniel Patel and Martin Haidacher and Jean-Paul Balabanian
and Eduard Gr\"{o}ller",
year = "2009",
abstract = "We define a transfer function based on the first and second
statistical moments. We consider the evolution of the mean
and variance with respect to a growing neighborhood around a
voxel. This evolution defines a curve in 3D for which we
identify important trends and project it back to 2D. The
resulting 2D projection can be brushed for easy and robust
classification of materials and material borders. The
transfer function is applied to both CT and MR data.",
month = apr,
isbn = "978-1-4244-4404-5",
location = "Peking, China",
editor = "Peter Eades, Thomas Ertl, Han-Wei Shen",
booktitle = "Proceedings of the IEEE Pacific Visualization Symposium 2009",
pages = "201--208",
keywords = "Statistical Moments, Volume Classification, Statistics",
URL = "https://www.cg.tuwien.ac.at/research/publications/2009/patel_2009_MC/",
}
@inproceedings{haidacher-2008-vcbm,
title = "Information-based Transfer Functions for Multimodal
Visualization",
author = "Martin Haidacher and Stefan Bruckner and Armin Kanitsar and
Eduard Gr\"{o}ller",
year = "2008",
abstract = "Transfer functions are an essential part of volume
visualization. In multimodal visualization at least two
values exist at every sample point. Additionally, other
parameters, such as gradient magnitude, are often retrieved
for each sample point. To find a good transfer function for
this high number of parameters is challenging because of the
complexity of this task. In this paper we present a general
information-based approach for transfer function design in
multimodal visualization which is independent of the used
modality types. Based on information theory, the complex
multi-dimensional transfer function space is fused to allow
utilization of a well-known 2D transfer function with a
single value and gradient magnitude as parameters.
Additionally, a quantity is introduced which enables better
separation of regions with complementary information. The
benefit of the new method in contrast to other techniques is
a transfer function space which is easy to understand and
which provides a better separation of different tissues. The
usability of the new approach is shown on examples of
different modalities.",
month = oct,
isbn = "978-3-905674-13-2",
publisher = "Eurographics Association",
location = "Delft",
issn = "2070-5778",
editor = "C.P Botha, G. Kindlmann, W.J. Niessen, and B. Preim",
booktitle = "VCBM ",
pages = "101--108",
keywords = "Multimodal Visualization, Transfer Function, Information
Theory",
URL = "https://www.cg.tuwien.ac.at/research/publications/2008/haidacher-2008-vcbm/",
}
@mastersthesis{haidacher-2007-idr,
title = "Importance-Driven Rendering in Interventional Imaging",
author = "Martin Haidacher",
year = "2007",
abstract = "In this thesis a combined visualization of dense clinical
data like 3D CTA (Computed Tomography Angiography) combined
with co-registered real-time images of medical intervention
applications is presented. The main challenge here is to
provide a merged visualization that allows sucient spatial
perception of the important parts, as derived from the
pre-operative data, while not occluding the information in
the real-time image embedded within the volume. This work
presents a new approach of importance denition for
volumetric data and how this importance can be used to
create a feature-emphasized visualization. Furthermore the
viewpoint and the position of the intervention image is used
to generate a contextual cutaway which inuences the density
of the visualization to avoid an occlusion of the real-time
image by less important parts of the volumetric data.",
month = aug,
address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria",
school = "Institute of Computer Graphics and Algorithms, Vienna
University of Technology ",
URL = "https://www.cg.tuwien.ac.at/research/publications/2007/haidacher-2007-idr/",
}
@inproceedings{burns-2007-fea,
title = "Feature Emphasis and Contextual Cutaways for Multimodal
Medical Visualization",
author = "Michael Burns and Martin Haidacher and Wolfgang Wein and
Ivan Viola and Eduard Gr\"{o}ller",
year = "2007",
abstract = "Dense clinical data like 3D Computed Tomography (CT) scans
can be visualized together with real-time imaging for a
number of medical intervention applications. However, it is
difficult to provide a fused visualization that allows
sufficient spatial perception of the anatomy of interest, as
derived from the rich pre-operative scan, while not
occluding the real-time image displayed embedded within the
volume. We propose an importance-driven approach that
presents the embedded data such that it is clearly visible
along with its spatial relation to the surrounding
volumetric material. To support this, we present and
integrate novel techniques for importance specification,
feature emphasis, and contextual cutaway generation. We show
results in a clinical context where a pre-operative CT scan
is visualized alongside a tracked ultrasound image, such
that the important vasculature is depicted between the
viewpoint and the ultrasound image, while a more opaque
representation of the anatomy is exposed in the surrounding
area.",
month = may,
isbn = "9783905673456",
publisher = "IEEE",
location = "Nork\"{o}pping, Schweden",
editor = "K. Museth, T. M\"{o}ller, A. Ynnerman",
booktitle = "Proceedings of Eurographics / IEEE VGTC Symposium on
Visualization (EuroVis 2007)",
pages = "275--282",
URL = "https://www.cg.tuwien.ac.at/research/publications/2007/burns-2007-fea/",
}
@studentproject{haidacher-2005-MND,
title = "Multiscale Nodule Detection in CT Data",
author = "Martin Haidacher",
year = "2005",
abstract = "In this paper I am describing a computer aided detection
(CAD) method, which is able to detect lung nodules in
medical data sets. The data sets are obtained by a high
resolution computer tomography. The goal of the nodule
detection is to gain an early nodule detection which
increases the probability of survival. Introduced method is
able to detect nodules of variable size and variable shape.
It is also rotation-invariant. The detection algorithm is
based on the Hessian matrix. This matrix consists of the
second-order partial derivatives. The eigenvalues of this
matrix are used to determine the probability of a
nodule-like shape. This method is well adapted to detect
nodules of a size larger than 4 mm diameter. Tests with
synthetic nodule data sets and some real data sets provided
a high probability of true nodule detection with a very low
number of false positives per data set.",
URL = "https://www.cg.tuwien.ac.at/research/publications/2005/haidacher-2005-MND/",
}
@studentproject{haidacher-2004-FVV,
title = "Merkmals-Visualisierung f\"{u}r Volumsdaten",
author = "Martin Haidacher",
year = "2004",
abstract = "In diesem Paper werden Methoden vorgestellt, welche das
Direct Volume Rendering (DVR) unterstutzen. Sie sollen vor
allem dazu dienen, innere Strukturen und wichtige Details
besser darzustellen. Die Methode Depth of Field adaptiert
den Effekt der Tiefenunscharfe aus der Fotographie fur die
Volumsvisualisierung. Der Fokusbereich wird dabei scharf
dargestellt und je nach Entfernung in Blickrichtung vom
Fokuspunkt und Linsenstarke, wird der jeweilige Bereich
unscharfer dargestellt. Die Methode fuhrt dazu, dass der
Benutzer schneller auf die wichtigen Teile des Datensatzes
hingewiesen wird. Die weiteren Methoden beschaftigen sich
mit der Darstellung der inneren Struktur von Objekten. Die
Methode des Focus+Context teilt dabei den Datensatz in einen
Fokus und einen Kontextbereich. Objektteile, welche im
Fokusbereich liegen, werden mittels DVR und den
dazugehorigen Transferfunktionen gerendert. Der Rest wird
mit einer nicht photorealistischen Methode gerendert, welche
nur die Umrisse der Objekte darstellt. Bei 3D Dithering wird
der Inhalt von gleichma{\ss}ig verteilten Wurfeln mit den
vorgegebenen Transferfunktionen gerendert und vom dazwischen
liegendem Rest werden nur die Konturen oder gar nichts
dargestellt. Die letzte Methode, Magic Lamp, rendert Teile
des Objektes mit DVR und den normalen Transferfunktionen nur
dann, wenn deren Gradient nahezu parallel zu einer
einstellbaren Richtung ist. Die restlichen Teile des
Objekts werden wieder nur durch ihre
Konturen dargestellt.
http://www.cg.tuwien.ac.at/courses/projekte_old/vis/finished/MHaidacher/index.html",
URL = "https://www.cg.tuwien.ac.at/research/publications/2004/haidacher-2004-FVV/",
}