@mastersthesis{trautner-2018-imd, title = "Importance-Driven Exploration of Molecular Dynamics Simulations", author = "Thomas Trautner", year = "2018", abstract = "The aim of this thesis is a novel real-time visualization approach for exploring molecular dynamics (MD-)simulations. Through the constantly improving hardware and everincreasing computing power, MD-simulations are more easily available. Additionally, they consist of hundreds, thousands or even millions of individual simulation frames and are getting more and more detailed. The calculation of such simulations is no longer limited by algorithms or hardware, nevertheless it is still not possible to efficiently explore this huge amount of simulation data, as animated 3D visualization, with ordinary and well established visualization tools. Using current software tools, the exploration of such long simulations takes too much time and due to the complexity of large molecular scenes, the visualizations highly suffer from visual clutter. It is therefore very likely that the user will miss important events. Therefore, we designed a focus & context approach for MD-simulations that guides the user to the most relevant temporal and spatial events, and it is no longer necessary to explore the simulation in a linear fashion. Our contribution can be divided into the following four topics: 1. Spatial importance through different levels of detail. Depending on the type of research task, different geometrical representations can be selected for both, focusand context elements. 2. Importance driven visibility management through ghosting, to prevent context elements from occluding focus elements. 3. Temporal importance through adaptive fast-forward. The playback speed of the simulation is thereby dependent on a single or a combination of multiple importance functions. 4. Visual declutter of accumulated frames through motion blur, which additionally illustrates the playback speed-up. Since the very beginning, this work was developed in close cooperation with biochemists from the Loschmidt Laboratories in Brno, Czech Republic. Together, we analyzed different use cases demonstrating the flexibility of our novel focus & context approach. ", month = oct, pages = "100", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "molecular dynamics simulation, realtime visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/trautner-2018-imd/", } @bachelorsthesis{edlinger-2018-vwr, title = "Visually Linking web search results with bookmarked information", author = "Georg Edlinger", year = "2018", abstract = "This work presents a new approach of regaining access to stored information and for the visualization of similarities between new information and locally stored data. The fact that bookmarks are cumbersome to use and that there is no possibility to compare web search results with local information motivates the concept of this thesis. The implementation was done as Google Chrome extension and based on the ’Information Collage’ environment. In order to improve the perceived ease of use, the visualization was integrated in the search engine results page to avoid a context switch for the user. The visualization uses a word cloud to display similarities and differences between remote and local information. The word cloud layout focuses on the spatial arrangment and the text colour of the words to encode their association to the remote or the local information.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/edlinger-2018-vwr/", } @inproceedings{steinboeck-2018-lbg, title = "Casual Visual Exploration of Large Bipartite Graphs Using Hierarchical Aggregation and Filtering", author = "Daniel Steinb\"{o}ck and Eduard Gr\"{o}ller and Manuela Waldner", year = "2018", abstract = "Bipartite graphs are typically visualized using linked lists or matrices. However, these classic visualization techniques do not scale well with the number of nodes. Biclustering has been used to aggregate edges, but not to create linked lists with thousands of nodes. In this paper, we present a new casual exploration interface for large, weighted bipartite graphs, which allows for multi-scale exploration through hierarchical aggregation of nodes and edges using biclustering in linked lists. We demonstrate the usefulness of the technique using two data sets: a database of media advertising expenses of public authorities and author-keyword co-occurrences from the IEEE Visualization Publication collection. Through an insight-based study with lay users, we show that the biclustering interface leads to longer exploration times, more insights, and more unexpected findings than a baseline interface using only filtering. However, users also perceive the biclustering interface as more complex.", month = oct, organization = "IEEE", location = "Konstanz, Germany", event = "4th International Symposium on Big Data Visual and Immersive Analytics", booktitle = "International Symposium on Big Data Visual and Immersive Analytics", keywords = "information visualization, bipartite graphs, biclustering, insight-based evaluation", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/steinboeck-2018-lbg/", } @article{mazurek-2018-veq, title = "Visualizing Expanded Query Results", author = "Michael Mazurek and Manuela Waldner", year = "2018", abstract = "When performing queries in web search engines, users often face difficulties choosing appropriate query terms. Search engines therefore usually suggest a list of expanded versions of the user query to disambiguate it or to resolve potential term mismatches. However, it has been shown that users find it difficult to choose an expanded query from such a list. In this paper, we describe the adoption of set-based text visualization techniques to visualize how query expansions enrich the result space of a given user query and how the result sets relate to each other. Our system uses a linguistic approach to expand queries and topic modeling to extract the most informative terms from the results of these queries. In a user study, we compare a common text list of query expansion suggestions to three set-based text visualization techniques adopted for visualizing expanded query results – namely, Compact Euler Diagrams, Parallel Tag Clouds, and a List View – to resolve ambiguous queries using interactive query expansion. Our results show that text visualization techniques do not increase retrieval efficiency, precision, or recall. Overall, users rate Parallel Tag Clouds visualizing key terms of the expanded query space lowest. Based on the results, we derive recommendations for visualizations of query expansion results, text visualization techniques in general, and discuss alternative use cases of set-based text visualization techniques in the context of web search.", month = jun, journal = "Computer Graphics Forum", pages = "87--98", keywords = "Information visualization, search interfaces, empirical studies in visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/", } @mastersthesis{steinboeck-2017-vbn, title = "Interactive Visual Exploration Interface for Large Bipartite Networks", author = "Daniel Steinb\"{o}ck", year = "2018", abstract = "In this thesis we introduce BiCFlows, a novel interactive visualization approach to explore large bipartite graphs. We were motivated by the Media Transparency Database, a public database established by the Austrian government to provide information about governmental advertising and subsidies expenses, which holds the characteristics of a large, weighted bipartite graph. Current approaches that deal with the visualization of the Media Transparency Database are limited by the fact that they do not offer a sufficient overview of the whole dataset. Other existing approaches that are not particularly designed for the Media Transparency Database, but deal with the visualization of bipartite graphs are in addition limited by their lack of scalability for large datasets. Aggregation is an often used concept in reducing the amount of data by grouping together similar data objects. This only works if the appropriate object properties are present in the data to use them for the aggregation. If this additional information is missing, like in the Media Transparency Database, other aggregation techniques have to be used. Since we are dealing with bipartite graphs in our approach, we use the concept of biclustering to establish a hierarchical structure within the data that can be interactively explored by the user. We showed that BiCFlows cannot only be used for the Media Transparency Database, but also for other datasets that share the characteristics of a weighted bipartite graph. Furthermore, we conducted an insight-based user study to compare BiCFlows with existing concepts and discussed advantages and drawbacks. We showed that BiCFlows supported users in their exploration process and let them gain more insight than with existing approaches.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/steinboeck-2017-vbn/", } @mastersthesis{gusenbauer-2018, title = "Bitstream - A bottom-up/top-down hybrid approach for web-based visual analysis of big data", author = "Matthias Gusenbauer", year = "2018", abstract = "Analyzing large amounts of data is becoming an ever increasing problem. Bitcoin as an example has produced more data than is possible to analyze. In order to compensate for these difficulties, creative ideas that employ data aggregation or minimization have been proposed. Other work also focuses on introducing novel visualization types that are geared towards the visualization of blockchain data. However, visualization of graphs through node-link diagrams remains a difficult challenge. Analysis of the Bitcoin transaction graph to follow bitcoin (BTC) transactions (TXs) poses a difficult problem due to the Bitcoin protocol and the amount of data. This thesis combines two data processing strategies to visualize big network data on commodity hardware. The idea is to use visualization as a technique to analyze a data-set containing Bitcoin transaction information. Criminals use Bitcoin as a means of payment because of its guaranteed pseudonymity. Through visualization we aim to identify patterns that will allow us to deanonymize transactions. To do so we use a proxy server that does data preprocessing before they are visualized on a web client. The proxy leverages parallel computing to be able to do top-down and bottom-up data processing fast enough for interactive visualization. This is done through incremental loading (bottom-up), which enables to visualize data immediately without a (pre-)processing delay. The database containing the public Bitcoin ledger is over 163 gigabytes in size. The resulting graph has more than 800 million nodes. As this information is too much to be visualized, we also employ a top-down approach of data aggregation and graph minimization of the transactional graph. Through this methodology we intend to solve performance problems of long processing delays and the problem of fractured data where the data is shown only partially in the visualization. We collaborate with security experts who share insights into their expertise through a continuously ongoing dialog. Exploratory analysis on a big data-set such as the Bitcoin ledger, enabled through the methodology presented in this thesis, will help security experts to analyze the money flow in a financial network that is used by criminals for its anonymity. We evaluate the result through the performance and feedback of these security experts as well as benchmark the performance against current best practice approaches.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/gusenbauer-2018/", } @talk{waldner-2018-ved, title = "Visual Data Exploration and Analysis in Emerging Display Environments ", author = "Manuela Waldner", year = "2018", abstract = "Increasingly powerful computing and display hardware open up entirely new ways for visual data exploration and analysis. Powerful machines and emerging display environments facilitate novel visual exploration techniques, collaborative data analysis, and even immersion into the scientific data. This talk will address the challenges we faced when bringing biomolecular visual analysis tools and complex molecular visualizations into such large, multi-user environments. A special focus lies on interfaces and attention guidance techniques we designed and evaluated to keep the user oriented and reduce visual clutter. ", month = apr, event = "Emerging Technologies in Scientific Data Visualisation - CECAM", location = "Pisa, Italy", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/waldner-2018-ved/", } @bachelorsthesis{smiech-2018-tei, title = "Configurable Text Exploration Interface with NLP for Decision Support", author = "Martin Smiech", year = "2018", abstract = "Having to read and understand lots of text documents and reports on a daily basis can be quite challenging. The intended audience for these reports has limited resources and wants to reduce time spent on reading such reports. Therefore a need for a tool emerges that assists the process of gaining relevant information out of reports/documents more quickly. These text documents are often unstructured and of varying length. They are written in the English language and are available from different sources (such as RSS feeds and text files). The aim of this project is to offer a tool that supports the process of analysing and understanding given texts. This is made possible by using natural language processing (NLP) and text visualization (TextVis). TextVis is already a well known and frequently used solution. The herein described project uses an NLP pipeline which serves as preprocessing for TextVis. To provide quick insight into the data, topic extraction mechanisms like Latent Dirichlet Allocation (LDA) or Non-negative Matrix Factorization (NMF) are available for the user to be chosen within the aforementioned pipeline. A major challenge for TextVis is the configuration of the NLP pipeline, because there are many different ways of doing so and a wide range of parameters to chose from. To overcome this issue, this project provides a solution that enables users to easily configure and customize their own NLP pipeline. It is designed to encourage these users to experiment with different sequences of NLP operations and parameter configurations to find a solution that suites them best. In order to keep it easy to use the software, it is implemented entirely using web technologies to be accessible in a common web browser. The resulting visualization will emphasize particular parts of the text based on a set of different factors, if selected so. These factors can be topics, sentiments and part-of-speech-tagged words. The focus of this work lies on a visual interface that enables and encourages users to adjust/optimize the underlying NLP pipeline (by selecting steps and setting parameters) and comparing their results. Evaluation with help of user feedback showed that certain pipeline configurations work better for certain types of texts than others. Using the solution created within this work, users can adapt the tool to their needs and also tweak it according to requirements. There is no universal configuration that works for all documents, however.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/smiech-2018-tei/", } @bachelorsthesis{cizmic-2018-evd, title = "Exploratory Data Visualization Dashboard for Technical Analysis of Commodity Market Indicators", author = "Dea Cizmic", year = "2018", abstract = "Companies and traders working in the commodity market encounter a variety of different data sets, including numerous economic indicators. The analysis of those indicators and their connection to certain markets can lead to important insights. The understanding of the market can be improved and predictions of the future market development can be created. However, dozens of economic indicators exist and one of the main challenges is to show a clear overview of the indicators and identify those, which show a correlation to a certain market. Software tools are often utilised in order to perform the analysis of financial markets. However, according to domain experts, they often hit the limit of human perception capabilities. This thesis focuses on the development of a prototypical web application dashboard, which enables the user to analyse the relation between a defined commodity market and different economic indicators. Besides the relation between one indicator and a given market, the possibility to interactively create one’s own composite indicator, for comparison with the given market, is implemented. The process of creating a composite indicator is another challenge as it requires numerous decisions to be made. The dashboard therefore offers a platform for exploring the different composite indicator configurations. Moreover, the web-application provides also some visualization and interaction techniques, like highlighting, brushing and details-on-demand to enhance the comparison process and amplify human cognition.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/cizmic-2018-evd/", } @article{polatsek-2018-stv, title = "Exploring visual attention and saliency modeling for task-based visual analysis", author = "Patrik Polatsek and Manuela Waldner and Ivan Viola and Peter Kapec and Wanda Benesova", year = "2018", abstract = "Memory, visual attention and perception play a critical role in the design of visualizations. The way users observe a visualization is affected by salient stimuli in a scene as well as by domain knowledge, interest, and the task. While recent saliency models manage to predict the users’ visual attention in visualizations during exploratory analysis, there is little evidence how much influence bottom-up saliency has on task-based visual analysis. Therefore, we performed an eye-tracking study with 47 users to determine the users’ path of attention when solving three low-level analytical tasks using 30 different charts from the MASSVIS database [1]. We also compared our task-based eye tracking data to the data from the original memorability experiment by Borkin et al. [2]. We found that solving a task leads to more consistent viewing patterns compared to exploratory visual analysis. However, bottom-up saliency of a visualization has negligible influence on users’ fixations and task efficiency when performing a low-level analytical task. Also, the efficiency of visual search for an extreme target data point is barely influenced by the target’s bottom-up saliency. Therefore, we conclude that bottom-up saliency models tailored towards information visualization are not suitable for predicting visual attention when performing task-based visual analysis. We discuss potential reasons and suggest extensions to visual attention models to better account for task-based visual analysis.", month = feb, doi = "https://doi.org/10.1016/j.cag.2018.01.010", journal = "Computers & Graphics", number = "2", keywords = "Information visualization, Eye-tracking experiment, Saliency, Visual attention, Low-level analytical tasks", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/", } @talk{Waldner_2017_11, title = "Guiding Attention in Complex Visualizations using Flicker", author = "Manuela Waldner", year = "2017", abstract = "Drawing the user’s gaze to an important item in an image or a graphical user interface is a common challenge. Usually, some form of highlighting is used, such as a clearly distinct color or a border around the item. Flicker is also a strong visual attractor in the entire visual field, without distorting, suppressing, or adding any scene elements. While it is very salient, it is often perceived as annoying. In this talk, I will present our research on how flicker can be used as attention guidance technique in cluttered visualizations while lowering its negative side-effects. In particular, I will first present results of studies examining a two-stage flicker technique for dynamic visualizations on large displays. Then, I will present we our explorations of high frequency flicker (60 to 72 Hz) to guide the user’s attention in images. At such high frequencies, the critical flicker frequency (CFF) threshold is reached, which makes the flicker appear to fuse into a stable signal. However, the CFF is not uniform across the visual field, but is higher in the peripheral vision at normal lighting conditions. We show that high frequency flicker, using personalized attributes like patch size and luminance, can be easily detected by observers in the peripheral vision, but the signal is hardly visible in the foveal vision when users directly look at the flickering patch. We demonstrate that this property can be used to draw the user’s attention to important image regions using a standard high refresh-rate computer monitor with minimal visible modifications to the image.", month = nov, event = "S&T Cooperation Austria-Czech Republic", location = "Czech Technical University", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Waldner_2017_11/", } @inproceedings{geymayer-2017-std, title = "How Sensemaking Tools Influence Display Space Usage", author = "Thomas Geymayer and Manuela Waldner and Alexander Lex and Dieter Schmalstieg", year = "2017", abstract = "We explore how the availability of a sensemaking tool influences users’ knowledge externalization strategies. On a large display, users were asked to solve an intelligence analysis task with or without a bidirectionally linked concept-graph (BLC) to organize insights into concepts (nodes) and relations (edges). In BLC, both nodes and edges maintain “deep links” to the exact source phrases and sections in associated documents. In our control condition, we were able to reproduce previously described spatial organization behaviors using document windows on the large display. When using BLC, however, we found that analysts apply spatial organization to BLC nodes instead, use significantly less display space and have significantly fewer open windows.", month = jun, event = "EuroVis 2017", booktitle = "EuroVis Workshop on Visual Analytics", keywords = "sensemaking, large displays, evaluation", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/geymayer-2017-std/", } @inproceedings{waldner-2017-vph, title = "Exploring Visual Prominence of Multi-Channel Highlighting in Visualizations", author = "Manuela Waldner and Alexey Karimov and Eduard Gr\"{o}ller", year = "2017", abstract = "Visualizations make rich use of multiple visual channels so that there are few resources left to make selected focus elements visually distinct from their surrounding context. A large variety of highlighting techniques for visualizations has been presented in the past, but there has been little systematic evaluation of the design space of highlighting. We explore highlighting from the perspective of visual marks and channels – the basic building blocks of visualizations that are directly controlled by visualization designers. We present the results from two experiments, exploring the visual prominence of highlighted marks in scatterplots: First, using luminance as a single highlight channel, we found that visual prominence is mainly determined by the luminance difference between the focus mark and the brightest context mark. The brightness differences between context marks and the overall brightness level have negligible influence. Second, multi-channel highlighting using luminance and blur leads to a good trade-off between highlight effectiveness and aesthetics. From the results, we derive a simple highlight model to balance highlighting across multiple visual channels and focus and context marks, respectively.", month = may, booktitle = "Spring Conference on Computer Graphics 2017", keywords = "information visualization, highlighting, focus+context, visual prominence", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/", } @article{Waldin_Nicholas_2017_FlickerObserver, title = "Flicker Observer Effect: Guiding Attention Through High Frequency Flicker in Images", author = "Nicholas Waldin and Manuela Waldner and Ivan Viola", year = "2017", abstract = "Drawing the user's gaze to an important item in an image or a graphical user interface is a common challenge. Usually, some form of highlighting is used, such as a clearly distinct color or a border around the item. Flicker can also be very salient, but is often perceived as annoying. In this paper, we explore high frequency flicker (60 to 72 Hz) to guide the user's attention in an image. At such high frequencies, the critical flicker frequency (CFF) threshold is reached, which makes the flicker appear to fuse into a stable signal. However, the CFF is not uniform across the visual field, but is higher in the peripheral vision at normal lighting conditions. Through experiments, we show that high frequency flicker can be easily detected by observers in the peripheral vision, but the signal is hardly visible in the foveal vision when users directly look at the flickering patch. We demonstrate that this property can be used to draw the user's attention to important image regions using a standard high refresh-rate computer monitor with minimal visible modifications to the image. In an uncalibrated visual search task, users could in a crowded image easily spot the specified search targets flickering with very high frequency. They also reported that high frequency flicker was distracting when they had to attend to another region, while it was hardly noticeable when looking at the flickering region itself.", month = may, journal = "Computer Graphics Forum", volume = "36", number = "2", pages = "467--476", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Waldin_Nicholas_2017_FlickerObserver/", } @bachelorsthesis{Koszticsak-2017-ewt, title = "Generating Expressive Window Thumbnails through Seam Carving", author = "Rebeka Koszticsak", year = "2017", abstract = "Thumbnails are used to display lists of open windows or tabs when switching between them on computers and on mobile devices. These images make it easier to recognize the opened applications, and help to find the needed window quicker. Thumbnails however only display a screenshot of the windows, so they get potentially confusing if there are more opened windows or if the same application is opened multiple times. Depending on the resolution of the display, the screenshot size decreases as the number of opened windows increases. Furthermore, within the same application (like MS Office World) the screenshots are similar in appearance (e.g. : white paper and tool bar), but the important text is not readable. There are several approaches that filter the important areas of the images to enhance the main region. In this bachelor thesis an application is implemented that uses the above methods on screenshots. Screenshots of windows are reduced by cropping the irrelevant elements of the margin area using seam carving, i.e. by eliminating the non-important pixel paths; and by common down-sampling. As a result the thumbnails show only relevant information, which makes them more expressive and easier to fulfill their purpose.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Koszticsak-2017-ewt/", } @studentproject{mazurek-2017-vows, title = "Visualization of Thesaurus-Based Web Search", author = "Michael Mazurek", year = "2017", abstract = "The general functions of current web search engines are well established. A box is provided in which to type the queries and the engine returns a result list which users can evaluate. The autocomplete suggestions assist users in defining their problems, however there is a lack of support for an iterative manual refinement of the query. This additional aid can be beneficial when users not know the exact terms to describe the concept they are looking for. Therefore, the goal of this project is to show searchers how a slight variation of the query changes the results. With this information, they then can perform a targeted refinement of the query to access useful information sources. To achieve this goal, each part of the searcher’s query is varied with a thesaurus that provides synonyms for the individual query terms. While performing the user’s original query in a normal fashion, variations of this query are conducted in the background. To provide a concise visual summary of the query results, text mining techniques are performed on all gathered results to retrieve the most important key terms for each query variation. This procedure results in a visual overview of what the searcher’s query finds together with what could be found with a slight variation of the query. Additional queries should make users aware that alternative queries may be more appropriate when their original query is poorly formulated. In conjunction with some interaction tools, the goal is to reduce the burden of refining search queries and therefore making searching the web less complex.", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/mazurek-2017-vows/", } @bachelorsthesis{mazurek-2017-sio, title = "Stream I/O - An Interactive Visualization of Publication Data", author = "Michael Mazurek", year = "2017", abstract = "The publication database of the Institute of Computer Graphics and Algorithms can currently be queried by a simple UI which returns a list. Stream I/O, the application of this thesis, extends the interface to improve it in terms of overview, exploration and analysis support. To cope with these shortcommings a visualization is added to the user interface. As the publication database includes a lot of additional data attributes, a selection of attributes is used for the visualization to give further insight. By using the Streamgraph [BW08] visualization, the variations over time within attributes like authors, publication type and research areas are made visible. The focus of this visualization lies in showing individual attribute values while also conveying the sum. This relationship is depicted in a timeline, which allows a user to explore the past and current work of the institute as well as to find relationships and trends in the publications. As the visualization uses a timeline encoding, the directed flow from left to right is interpreted as the movement through time. It shows the evolution of different attributes, while the occurrence of a topic at a specific time is coded with the width of the layer at a specific point. Searching the database is enriched through multiple viewpoints which give the user insight how attributes relate in the underlying data and how the data is changing through his/her manipulation. Selections of colored layers within the graph can represent bigger trends and give insight into the data as a whole. The Stream I/O application invites users to interactively explore the publication database, while simultaneously gaining new insight through the visualization.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/mazurek-2017-sio/", } @studentproject{steinboeck-2017-vefp, title = "Visualization of EU Funding Programmes", author = "Daniel Steinb\"{o}ck", year = "2017", abstract = "To fund research and technological development, not only in Europe but all over the world, the European Union created so-called Framework Programmes. The data of these programmes, containing information about projects, corresponding topics, funding sums, funding periods and recipient countries, is publicly available, but hard to analyze without visual support. Therefore, a multiple coordinated view approach is developed in course of this project. The different visualization techniques used, like bar charts, treemap, choropleth map and line graph, make it possible to filter, analyze and further explore the available data through brushing and linking. The project was developed in close collaboration with the end users of the Centre for Social Innovation and received an overall positive feedback from them.", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/steinboeck-2017-vefp/", } @bachelorsthesis{donabauer-2015-asc, title = "Advanced Screen Capturing", author = "Johanna Donabauer", year = "2016", abstract = "Creating a free-form screen shot to get what is needed is not trivial task. Moving the mouse cursor around the desired region to indicate the boundary of the screen snippet often yields jagged outlines in the result image. Furthermore, it is not that easy to fully exclude unnecessary screen elements during the selection process. As a consequence, the created screen shot may also contain some small portions of surrounded image elements which are unwanted in the result image or some areas may be accidentally truncated. To overcome these limitations, Advanced Screen Capturing combines screen capture functionalities with image processing methods as an alternative way for creating free-form screen shots. The selection of the desired screen region is done by roughly selecting the needed image area. After extracting the highlighted screen region, a stroke mask is calculated for detecting the surrounded image elements by the drawn stroke. This stroke mask forms the basis for rejecting or accepting partly crossed image regions or truncating large ones. The image regions for accepting and rejection are detected as contours. By combining the stroke mask and the detected contours the segmentation mask is generated, where image regions are either fully included, fully rejected, or cut, depending on their amount of overlays with the stroke mask. Based on the segmentation mask, the original captured screen shot is segmented. In the end, the calculated result image contains all user defined relevant image information and adapted image boundaries. The introduced screen capture and contour-based segmentation algorithm works best for screen elements like text, charts, different kinds of shapes and a combination of these. Advanced Screen Capturing can be used as a library for integration purposes in other systems or as a stand alone desktop application. It was implemented by using C++ and Qt Framework and the OpenCV library.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/donabauer-2015-asc/", } @inproceedings{Waldin_Nicholas_2016_Chameleon, title = "Chameleon Dynamic Color Mapping for Multi-Scale Structural Biology Models", author = "Nicholas Waldin and Mathieu Le Muzic and Manuela Waldner and Eduard Gr\"{o}ller and David Goodsell and Ludovic Autin and Ivan Viola", year = "2016", abstract = "Visualization of structural biology data uses color to categorize or separate dense structures into particular semantic units. In multiscale models of viruses or bacteria, there are atoms on the finest level of detail, then amino-acids, secondary structures, macromolecules, up to the compartment level and, in all these levels, elements can be visually distinguished by color. However, currently only single scale coloring schemes are utilized that show information for one particular scale only. We present a novel technology which adaptively, based on the current scale level, adjusts the color scheme to depict or distinguish the currently best visible structural information. We treat the color as a visual resource that is distributed given a particular demand. The changes of the color scheme are seamlessly interpolated between the color scheme from the previous views into a given new one. With such dynamic multi-scale color mapping we ensure that the viewer is able to distinguish structural detail that is shown on any given scale. This technique has been tested by users with an expertise in structural biology and has been overall well received.", event = "VCBM", booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/", }