[
    {
        "id": "kraehsmaier-dsc",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Detection, Segmentation and Classification of Scarves",
        "date": "2025-08-21",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "picture-teaser",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 572,
            "image_height": 697,
            "name": "kraehsmaier-dsc-picture-teaser.png",
            "type": "image/png",
            "size": 609170,
            "path": "Publication:kraehsmaier-dsc",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2025/kraehsmaier-dsc/kraehsmaier-dsc-picture-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/kraehsmaier-dsc/kraehsmaier-dsc-picture-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5495
        ],
        "date_end": "2025-08-21",
        "date_start": "2024-10-01",
        "matrikelnr": "12122535",
        "supervisor": [
            166
        ],
        "research_areas": [
            "Modeling",
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "picture-teaser",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 572,
                "image_height": 697,
                "name": "kraehsmaier-dsc-picture-teaser.png",
                "type": "image/png",
                "size": 609170,
                "path": "Publication:kraehsmaier-dsc",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/kraehsmaier-dsc/kraehsmaier-dsc-picture-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/kraehsmaier-dsc/kraehsmaier-dsc-picture-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "kraehsmaier-dsc-thesis.pdf",
                "type": "application/pdf",
                "size": 18617457,
                "path": "Publication:kraehsmaier-dsc",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/kraehsmaier-dsc/kraehsmaier-dsc-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/kraehsmaier-dsc/kraehsmaier-dsc-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2025/kraehsmaier-dsc/",
        "__class": "Publication"
    },
    {
        "id": "cardoso-thesis",
        "type_id": "phdthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/209309",
        "title": "Approaching Under-Explored Image-Space Problems with Optimization",
        "date": "2024-12-19",
        "abstract": "This doctoral dissertation delves into three distinct yet interconnected problems in the realm of interactive image-space computing in computer graphics, each of which has not been tackled by existing literature.The first problem centers on the prediction of visual error metrics in real-time applications, specifically in the context of content-adaptive shading and shading reuse. Utilizing convolutional neural networks, this research aims to estimate visual errors without requiring reference or rendered images. The models developed can account for 70%–90% of the variance and achieve computation times that are an order of magnitude faster than existing methods. This enables a balance between resource-saving and visual quality, particularly in deferred shading pipelines, and can achieve up to twice the performance compared to state-of-the-art methods depending on the portion of unseen image regions. The second problem focuses on the burgeoning field of light-field cameras and the challenges associated with depth prediction. This research argues for the refinement of cost volumes rather than depth maps to increase the accuracy of depth predictions. A set of cost-volume refinement algorithms is proposed, which dynamically operate at runtime to find optimal solutions, thereby enhancing the accuracy and reliability of depth estimation in light fields.The third problem tackles the labor-intensive nature of hand-drawn animation, specifically in the detailing of character eyes. An unsupervised network is introduced that blends inpainting and image-to-image translation techniques. This network employs a novel style-aware clustering method and a dual-discriminator optimization strategy with a triple-reconstruction loss. The result is an improvement in the level of detail and artistic consistency in hand-drawn animation, preferred over existing work 95.16% of the time according to a user study.Optimization techniques are the common thread that ties these problems together. While dynamic optimization at runtime is employed for cost volume refinement, deep-learning methods are used offline to train global solutions for the other two problems. This research not only fills gaps in the existing literature but also paves the way for future explorations in the field of computer graphics and optimization, offering new avenues for both academic research and practical applications.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "date",
        "repositum_presentation_id": null,
        "authors": [
            1639
        ],
        "ac_number": "AC17414787",
        "date_end": "2024-12-19",
        "date_start": "2019-04",
        "doi": "10.34726/hss.2025.128664",
        "matrikelnr": "11937133",
        "open_access": "yes",
        "pages": "110",
        "reviewer_1": [
            1825
        ],
        "reviewer_2": [
            5420
        ],
        "rigorosum": "2024-12-19",
        "supervisor": [
            193
        ],
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "variable-rate shading",
            "light-fields",
            "limited animation",
            "anime",
            "convolutional neural networks"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "cardoso-thesis-thesis.pdf",
                "type": "application/pdf",
                "size": 47447576,
                "path": "Publication:cardoso-thesis",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-thesis/cardoso-thesis-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-thesis/cardoso-thesis-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "EVOCATION"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-thesis/",
        "__class": "Publication"
    },
    {
        "id": "thesis-kronsteiner",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Einfluss des Seitenverhältnisses auf Parallele Koordinaten",
        "date": "2024-10-10",
        "abstract": "Parallel coordinates are a unique visualization technique that presents promising opportunities for the visualization of large and diverse multivariate datasets. Applications\nsuch as web-based visualizations and dashboards are common use cases for this type\nof data. Prevalent concepts in the modern web are responsive design - the ability of\na web page to fit any screen resolution - as well as interactivity and customizability,\nrequiring us to consider the role of aspect ratio in the design of visual displays. We\nimplemented a web-based tool and conducted a statistical analysis of angle parameters\nin parallel coordinates plots. Our results indicate a significant influence of aspect ratio\non the display of parallel coordinates, and show that landscape orientations are more\nconsistent across different aspect ratios than portrait orientations.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1159,
            "image_height": 814,
            "name": "thesis-kronsteiner-image.jpeg",
            "type": "image/jpeg",
            "size": 400170,
            "path": "Publication:thesis-kronsteiner",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/thesis-kronsteiner-image.jpeg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/thesis-kronsteiner-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5398
        ],
        "date_end": "2024-10-10",
        "date_start": "2024-04-10",
        "matrikelnr": "11808233",
        "supervisor": [
            166
        ],
        "research_areas": [
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1159,
                "image_height": 814,
                "name": "thesis-kronsteiner-image.jpeg",
                "type": "image/jpeg",
                "size": 400170,
                "path": "Publication:thesis-kronsteiner",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/thesis-kronsteiner-image.jpeg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/thesis-kronsteiner-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "thesis-kronsteiner-thesis.pdf",
                "type": "application/pdf",
                "size": 7513311,
                "path": "Publication:thesis-kronsteiner",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/thesis-kronsteiner-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/thesis-kronsteiner-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/",
        "__class": "Publication"
    },
    {
        "id": "pahr-2024-ieo",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/199161",
        "title": "Investigating the Effect of Operation Mode and Manifestation on Physicalizations of Dynamic Processes",
        "date": "2024-06",
        "abstract": "We conducted a study to systematically investigate the communication of complex dynamic processes along a two-dimensional design space, where the axes represent a representation's manifestation (physical or virtual) and operation (manual or automatic). We exemplify the design space on a model embodying cardiovascular pathologies, represented by a mechanism where a liquid is pumped into a draining vessel, with complications illustrated through modifications to the model. The results of a mixed-methods lab study with 28 participants show that both physical manifestation and manual operation have a strong positive impact on the audience's engagement. The study does not show a measurable knowledge increase with respect to cardiovascular pathologies using manually operated physical representations. However, subjectively, participants report a better understanding of the process—mainly through non-visual cues like haptics, but also auditory cues. The study also indicates an increased task load when interacting with the process, which, however, seems to play a minor role for the participants. Overall, the study shows a clear potential of physicalization for the communication of complex dynamic processes, which only fully unfold if observers have to chance to interact with the process.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 2634,
            "image_height": 1232,
            "name": "pahr-2024-ieo-teaser.png",
            "type": "image/png",
            "size": 172079,
            "path": "Publication:pahr-2024-ieo",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/pahr-2024-ieo-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/pahr-2024-ieo-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "projects,date_from,date_to,event,lecturer,pages_from,pages_to",
        "repositum_presentation_id": null,
        "authors": [
            1813,
            1850,
            1464,
            1110,
            1410
        ],
        "articleno": "e15106",
        "date_from": "2024-06-27",
        "date_to": "2024-06-31",
        "doi": "10.1111/cgf.15106",
        "event": "EUROVIS",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1813
        ],
        "number": "3",
        "pages": "12",
        "pages_from": "1",
        "pages_to": "12",
        "publisher": "WILEY",
        "volume": "43",
        "research_areas": [
            "InfoVis",
            "MedVis",
            "Perception"
        ],
        "keywords": [
            "Data Physicalization",
            "Study",
            "Cardiovascular Diseases",
            "Edutainment",
            "Human Computer Interaction (HCI)",
            "Mixed Methods"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 2634,
                "image_height": 1232,
                "name": "pahr-2024-ieo-teaser.png",
                "type": "image/png",
                "size": 172079,
                "path": "Publication:pahr-2024-ieo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/pahr-2024-ieo-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/pahr-2024-ieo-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/",
        "__class": "Publication"
    },
    {
        "id": "eschner-2023-ims",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/187705",
        "title": "Illustrative Motion Smoothing for Attention Guidance in Dynamic Visualizations",
        "date": "2023-06",
        "abstract": "3D animations are an effective method to learn about complex dynamic phenomena, such as mesoscale biological processes. The animators’ goals are to convey a sense of the scene’s overall complexity while, at the same time, visually guiding the user through a story of subsequent events embedded in the chaotic environment. Animators use a variety of visual emphasis techniques to guide the observers’ attention through the story, such as highlighting, halos – or by manipulating motion parameters of the scene. In this paper, we investigate the effect of smoothing the motion of contextual scene elements to attract attention to focus elements of the story exhibiting high-frequency motion. We conducted a crowdsourced study with 108 participants observing short animations with two illustrative motion smoothing strategies: geometric smoothing through noise reduction of contextual motion trajectories and visual smoothing through motion blur of context items. We investigated the observers’ ability to follow the story as well as the effect of the techniques on speed perception in a molecular scene. Our results show that moderate motion blur significantly improves users’ ability to follow the story. Geometric motion smoothing is less effective but increases the visual appeal of the animation. However, both techniques also slow down the perceived speed of the animation. We discuss the implications of these results and derive design guidelines for animators of complex dynamic visualizations.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1400,
            "image_height": 1080,
            "name": "eschner-2023-ims-teaser.png",
            "type": "image/png",
            "size": 1527403,
            "path": "Publication:eschner-2023-ims",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "event,lecturer,location,projects",
        "repositum_presentation_id": null,
        "authors": [
            1653,
            935,
            1110
        ],
        "doi": "10.1111/cgf.14836",
        "event": "EuroVis 2023",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1653
        ],
        "location": "Leipzig, Germany",
        "number": "3",
        "open_access": "yes",
        "pages": "12",
        "pages_from": "361",
        "pages_to": "372",
        "publisher": "WILEY",
        "volume": "42",
        "research_areas": [
            "BioVis",
            "IllVis",
            "Perception"
        ],
        "keywords": [
            "Empirical studies in visualization",
            "Animation"
        ],
        "weblinks": [
            {
                "href": "https://onlinelibrary.wiley.com/doi/10.1111/cgf.14836",
                "caption": "Computer Graphics Forum",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "eschner-2023-ims-paper.pdf",
                "type": "application/pdf",
                "size": 9072344,
                "path": "Publication:eschner-2023-ims",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1400,
                "image_height": 1080,
                "name": "eschner-2023-ims-teaser.png",
                "type": "image/png",
                "size": 1527403,
                "path": "Publication:eschner-2023-ims",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "eschner-2023-ims-video.mp4",
                "type": "video/mp4",
                "size": 16270968,
                "path": "Publication:eschner-2023-ims",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/",
        "__class": "Publication"
    },
    {
        "id": "eschner-blur-2022",
        "type_id": "studentproject",
        "tu_id": null,
        "repositum_id": null,
        "title": "Generating Molecular Motion Blur Videos for a User Study",
        "date": "2022-10",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1325,
            "image_height": 789,
            "name": "eschner-blur-2022-teaser.png",
            "type": "image/png",
            "size": 472805,
            "path": "Publication:eschner-blur-2022",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2022/eschner-blur-2022/eschner-blur-2022-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/eschner-blur-2022/eschner-blur-2022-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1653
        ],
        "date_end": "2022-10",
        "date_start": "2021-11",
        "matrikelnr": "01633402",
        "supervisor": [
            1110,
            935
        ],
        "research_areas": [
            "BioVis",
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "project report",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "eschner-blur-2022-project report.pdf",
                "type": "application/pdf",
                "size": 3469035,
                "path": "Publication:eschner-blur-2022",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2022/eschner-blur-2022/eschner-blur-2022-project report.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/eschner-blur-2022/eschner-blur-2022-project report:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1325,
                "image_height": 789,
                "name": "eschner-blur-2022-teaser.png",
                "type": "image/png",
                "size": 472805,
                "path": "Publication:eschner-blur-2022",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2022/eschner-blur-2022/eschner-blur-2022-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/eschner-blur-2022/eschner-blur-2022-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2022/eschner-blur-2022/",
        "__class": "Publication"
    },
    {
        "id": "kristmann-2022-occ",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Occluder Frequency Analysis for Evaluating the Level of Visibility of Partly Occluded Objects",
        "date": "2022-07",
        "abstract": "To increase rendering efficiency of large and complex scenes, occlusion culling algorithms detect objects which are completely hidden by others and therefore do not need to be rendered. However, these methods often follow an all-or-nothing principle, either culling the geometry entirely or drawing it at full detail. This approach disregards an important subcategory of the visibility problem: detecting objects that are hardly visible because they are partly occluded and which can therefore be rendered at a lower level of detail without generating noticeable artifacts. In this thesis we assess the level of visibility of such objects by computing a hierarchical occlusion map and analysing its structure based on the frequencies of the occluders. This analysis results in a parameter that controls the level of detail (LOD) in which the geometry is rendered. The algorithm performs well even in scenes with sparse occlusion, surpassing the standard hierarchical occlusion map algorithm, with still a lot of potential for even further improvements.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 1368,
            "image_height": 1280,
            "name": "kristmann-2022-occ-teaser.png",
            "type": "image/png",
            "size": 252502,
            "path": "Publication:kristmann-2022-occ",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2022/kristmann-2022-occ/kristmann-2022-occ-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/kristmann-2022-occ/kristmann-2022-occ-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1935
        ],
        "date_end": "2022-07-15",
        "date_start": "2022-01-01",
        "matrikelnr": "01518693",
        "supervisor": [
            193,
            1650
        ],
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "rendering",
            "occlusion culling",
            "real-time"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 1368,
                "image_height": 1280,
                "name": "kristmann-2022-occ-teaser.png",
                "type": "image/png",
                "size": 252502,
                "path": "Publication:kristmann-2022-occ",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2022/kristmann-2022-occ/kristmann-2022-occ-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/kristmann-2022-occ/kristmann-2022-occ-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "kristmann-2022-occ-thesis.pdf",
                "type": "application/pdf",
                "size": 4728973,
                "path": "Publication:kristmann-2022-occ",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2022/kristmann-2022-occ/kristmann-2022-occ-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/kristmann-2022-occ/kristmann-2022-occ-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "3DSpatialization"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2022/kristmann-2022-occ/",
        "__class": "Publication"
    },
    {
        "id": "cardoso-2022-rtpercept",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/142206",
        "title": "Training and Predicting Visual Error for Real-Time Applications",
        "date": "2022-05",
        "abstract": "Visual error metrics play a fundamental role in the quantification of perceived image similarity. Most recently, use cases for them in real-time applications have emerged, such as content-adaptive shading and shading reuse to increase performance and improve efficiency. A wide range of different metrics has been established, with the most sophisticated being capable of capturing the perceptual characteristics of the human visual system. However, their complexity, computational expense, and reliance on reference images to compare against prevent their generalized use in real-time, restricting such applications to using only the simplest available metrics.\n\nIn this work, we explore the abilities of convolutional neural networks to predict a variety of visual metrics without requiring either reference or rendered images. Specifically, we train and deploy a neural network to estimate the visual error resulting from reusing shading or using reduced shading rates. The resulting models account for 70%--90% of the variance while achieving up to an order of magnitude faster computation times. Our solution combines image-space information that is readily available in most state-of-the-art deferred shading pipelines with reprojection from previous frames to enable an adequate estimate of visual errors, even in previously unseen regions. We describe a suitable convolutional network architecture and considerations for data preparation for training. We demonstrate the capability of our network to predict complex error metrics at interactive rates in a real-time application that implements content-adaptive shading in a deferred pipeline. Depending on the portion of unseen image regions, our approach can achieve up to 2x performance compared to state-of-the-art methods.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1920,
            "image_height": 1088,
            "name": "cardoso-2022-rtpercept-teaser.png",
            "type": "image/png",
            "size": 2493649,
            "path": "Publication:cardoso-2022-rtpercept",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2022/cardoso-2022-rtpercept/cardoso-2022-rtpercept-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/cardoso-2022-rtpercept/cardoso-2022-rtpercept-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1639,
            1650,
            868,
            1921,
            193
        ],
        "cfp": {
            "name": "cfp.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "1227280",
            "orig_name": "cfp.pdf",
            "ext": "pdf"
        },
        "date_from": "2022-05-03",
        "date_to": "2022-05-05",
        "doi": "10.1145/3522625",
        "event": "ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games",
        "issn": "2577-6193",
        "journal": "Proceedings of the ACM on Computer Graphics and Interactive Techniques",
        "lecturer": [
            1639
        ],
        "location": "online",
        "number": "1",
        "open_access": "yes",
        "pages": "17",
        "pages_from": "1",
        "pages_to": "17",
        "publisher": "Association for Computing Machinery",
        "volume": "5",
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "perceptual error",
            "variable rate shading",
            "real-time"
        ],
        "weblinks": [
            {
                "href": "https://jaliborc.github.io/rt-percept/",
                "caption": "Paper Website",
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "cardoso-2022-rtpercept-paper.pdf",
                "type": "application/pdf",
                "size": 54709850,
                "path": "Publication:cardoso-2022-rtpercept",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2022/cardoso-2022-rtpercept/cardoso-2022-rtpercept-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/cardoso-2022-rtpercept/cardoso-2022-rtpercept-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1920,
                "image_height": 1088,
                "name": "cardoso-2022-rtpercept-teaser.png",
                "type": "image/png",
                "size": 2493649,
                "path": "Publication:cardoso-2022-rtpercept",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2022/cardoso-2022-rtpercept/cardoso-2022-rtpercept-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/cardoso-2022-rtpercept/cardoso-2022-rtpercept-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "EVOCATION"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2022/cardoso-2022-rtpercept/",
        "__class": "Publication"
    },
    {
        "id": "stappen_SteFAS",
        "type_id": "inproceedings",
        "tu_id": 300417,
        "repositum_id": "20.500.12708/58631",
        "title": "Temporally Stable Content-Adaptive and Spatio-Temporal Shading Rate Assignment for Real-Time Applications",
        "date": "2021-10",
        "abstract": "We propose two novel methods to improve the efficiency and quality of real-time rendering applications: Texel differential-based content-adaptive shading (TDCAS) and spatio-temporally filtered adaptive shading (STeFAS). Utilizing Variable Rate Shading (VRS)-a hardware feature introduced with NVIDIA's Turing micro-architecture-and properties derived during rendering or Temporal Anti-Aliasing (TAA), our techniques adapt the resolution to improve the performance and quality of real-time applications. VRS enables different shading resolution for different regions of the screen during a single render pass. In contrast to other techniques, TDCAS and STeFAS have very little overhead for computing the shading rate. STeFAS enables up to 4x higher rendering resolutions for similar frame rates, or a performance increase of 4× at the same resolution.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 1227,
            "image_height": 799,
            "name": "stappen_SteFAS-teaser.png",
            "type": "image/png",
            "size": 126643,
            "path": "Publication:stappen_SteFAS",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2021/stappen_SteFAS/stappen_SteFAS-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/stappen_SteFAS/stappen_SteFAS-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1406,
            848,
            1650,
            193
        ],
        "booktitle": "Pacific Graphics Short Papers, Posters, and Work-in-Progress Papers",
        "cfp": {
            "name": "cfp.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "982263",
            "orig_name": "cfp.pdf",
            "ext": "pdf"
        },
        "date_from": "2021-10-18",
        "date_to": "2021-10-21",
        "doi": "10.2312/pg.20211391",
        "editor": "Lee, Sung-Hee and Zollmann, Stefanie and Okabe, Makoto and Wünsche, Burkhard",
        "event": "Pacific Graphics 2021",
        "isbn": "978-3-03868-162-5",
        "lecturer": [
            1406
        ],
        "location": "online",
        "organization": "The Eurographics Association",
        "pages": "2",
        "pages_from": "65",
        "pages_to": "66",
        "publisher": "Eurographics Association",
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "variable rate shading",
            "temporal antialiasing"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "stappen_SteFAS-paper.pdf",
                "type": "application/pdf",
                "size": 173697,
                "path": "Publication:stappen_SteFAS",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/stappen_SteFAS/stappen_SteFAS-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/stappen_SteFAS/stappen_SteFAS-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 1227,
                "image_height": 799,
                "name": "stappen_SteFAS-teaser.png",
                "type": "image/png",
                "size": 126643,
                "path": "Publication:stappen_SteFAS",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/stappen_SteFAS/stappen_SteFAS-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/stappen_SteFAS/stappen_SteFAS-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "3DSpatialization"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2021/stappen_SteFAS/",
        "__class": "Publication"
    },
    {
        "id": "grossmann-2021-layout",
        "type_id": "inproceedings",
        "tu_id": 300287,
        "repositum_id": "20.500.12708/58620",
        "title": "Does the Layout Really Matter? A Study on Visual Model Accuracy Estimation",
        "date": "2021-10",
        "abstract": "In visual interactive labeling, users iteratively assign labels to data items until the machine model reaches an acceptable accuracy. A crucial step of this process is to inspect the model's accuracy and decide whether it is necessary to label additional elements. In scenarios with no or very little labeled data, visual inspection of the predictions is required. Similarity-preserving scatterplots created through a dimensionality reduction algorithm are a common visualization that is used in these cases. Previous studies investigated the effects of layout and image complexity on tasks like labeling. However, model evaluation has not been studied systematically. We present the results of an experiment studying the influence of image complexity and visual grouping of images on model accuracy estimation. We found that users outperform traditional automated approaches when estimating a model's accuracy. Furthermore, while the complexity of images impacts the overall performance, the layout of the items in the plot has little to no effect on estimations.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "In both scatterplots shown here, the percentage of images with correctly predicted class labels (visualized as border color) is over 90%. We found that users can estimate these accuracies fairly well. Image complexity impacts overall performance, but the layout has very little effect on users’ estimations. ",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 3287,
            "image_height": 1817,
            "name": "grossmann-2021-layout-teaser.png",
            "type": "image/png",
            "size": 879037,
            "path": "Publication:grossmann-2021-layout",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1366,
            1851,
            1690,
            1110
        ],
        "booktitle": "IEEE Visualization Conference (VIS)",
        "cfp": {
            "name": "Short Paper Call for Participation.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "180486",
            "orig_name": "Short Paper Call for Participation.pdf",
            "ext": "pdf"
        },
        "doi": "10.1109/VIS49827.2021.9623326",
        "event": "IEEE Visualization Conference (VIS)",
        "lecturer": [
            1110
        ],
        "open_access": "yes",
        "pages": "5",
        "pages_from": "61",
        "pages_to": "65",
        "publisher": "IEEE Computer Society Press",
        "research_areas": [
            "InfoVis",
            "Perception"
        ],
        "keywords": [],
        "weblinks": [
            {
                "href": "https://arxiv.org/pdf/2110.07188.pdf",
                "caption": "arxiv",
                "description": "Link to arxiv version of the paper",
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "grossmann-2021-layout-paper.pdf",
                "type": "application/pdf",
                "size": 1951149,
                "path": "Publication:grossmann-2021-layout",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-paper:thumb{{size}}.png"
            },
            {
                "description": "Pre-recorded presentation for VIS 2021",
                "filetitle": "presentation",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "grossmann-2021-layout-presentation.mp4",
                "type": "video/mp4",
                "size": 28816734,
                "path": "Publication:grossmann-2021-layout",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-presentation.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-presentation:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-presentation:video.mp4"
            },
            {
                "description": "Fast-forward preview video",
                "filetitle": "preview",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "grossmann-2021-layout-preview.mp4",
                "type": "video/mp4",
                "size": 5736268,
                "path": "Publication:grossmann-2021-layout",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-preview.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-preview:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-preview:video.mp4"
            },
            {
                "description": "Supplementary document showing study conditions and interface",
                "filetitle": "supplement",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "grossmann-2021-layout-supplement.pdf",
                "type": "application/pdf",
                "size": 44940660,
                "path": "Publication:grossmann-2021-layout",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-supplement.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-supplement:thumb{{size}}.png"
            },
            {
                "description": "In both scatterplots shown here, the percentage of images with correctly predicted class labels (visualized as border color) is over 90%. We found that users can estimate these accuracies fairly well. Image complexity impacts overall performance, but the layout has very little effect on users’ estimations. ",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 3287,
                "image_height": 1817,
                "name": "grossmann-2021-layout-teaser.png",
                "type": "image/png",
                "size": 879037,
                "path": "Publication:grossmann-2021-layout",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/",
        "__class": "Publication"
    },
    {
        "id": "panfili-2021-myop",
        "type_id": "otherreviewed",
        "tu_id": null,
        "repositum_id": "20.500.12708/58726",
        "title": "Myopia in Head-Worn Virtual Reality",
        "date": "2021-03-27",
        "abstract": "In this work, we investigate the influence of myopia on the perceived visual acuity (VA) in head-worn virtual reality (VR). Factors such as display resolution or vision capabilities of users influence the VA in VR. We simulated eyesight tests in VR and on a desktop screen and conducted a user study comparing VA measurements of participants with normal sight and participants with myopia. Surprisingly, our results suggest that people with severe myopia can see better in VR than in the real world, while the VA of people with normal or corrected sight or mild myopia is reduced in VR.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1658,
            193,
            1030
        ],
        "booktitle": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
        "date_from": "2021-03-27",
        "date_to": "2021-04-01",
        "doi": "10.1109/VRW52623.2021.00197",
        "isbn": "978-1-6654-1166-0",
        "location": "online",
        "open_access": "no",
        "pages": "2",
        "pages_from": "629",
        "pages_to": "630",
        "publisher": "IEEE Computer Society Press",
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "visual impairments"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "abstract",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "panfili-2021-myop-abstract.pdf",
                "type": "application/pdf",
                "size": 1495051,
                "path": "Publication:panfili-2021-myop",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/panfili-2021-myop/panfili-2021-myop-abstract.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/panfili-2021-myop/panfili-2021-myop-abstract:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2021/panfili-2021-myop/",
        "__class": "Publication"
    },
    {
        "id": "luidolt-2020-lightperceptionVR",
        "type_id": "journalpaper",
        "tu_id": 291224,
        "repositum_id": "20.500.12708/140951",
        "title": "Gaze-Dependent Simulation of Light Perception in Virtual Reality",
        "date": "2020-12",
        "abstract": "The perception of light is inherently different inside a virtual reality (VR) or augmented reality (AR) simulation when compared to the real world. Conventional head-worn displays (HWDs) are not able to display the same high dynamic range of brightness and color as the human eye can perceive in the real world. To mimic the perception of real-world scenes in virtual scenes, it is crucial to reproduce the effects of incident light on the human visual system. In order to advance virtual simulations towards perceptual realism, we present an eye-tracked VR/AR simulation comprising effects for gaze-dependent temporal eye adaption, perceptual glare, visual acuity reduction, and scotopic color vision. Our simulation is based on medical expert knowledge and medical studies of the healthy human eye. We conducted the first user study comparing the perception of light in a real-world low-light scene to a VR simulation. Our results show that the proposed combination of simulated visual effects is well received by users and also indicate that an individual adaptation is necessary, because perception of light is highly subjective.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1478,
            "image_height": 534,
            "name": "luidolt-2020-lightperceptionVR-image.jpg",
            "type": "image/jpeg",
            "size": 1390298,
            "path": "Publication:luidolt-2020-lightperceptionVR",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-image.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1577,
            193,
            1030
        ],
        "cfp": {
            "name": "Screenshot_2020-10-30 Call for Papers – ISMAR 2020 – International Symposium on Mixed and Augmented Reality.png",
            "type": "image/png",
            "error": "0",
            "size": "219516",
            "orig_name": "Screenshot_2020-10-30 Call for Papers – ISMAR 2020 – International Symposium on Mixed and Augmented Reality.png",
            "ext": "png"
        },
        "date_from": "2020-11-09",
        "date_to": "2020-11-13",
        "doi": "10.1109/TVCG.2020.3023604",
        "event": "ISMAR 2020​",
        "first_published": "2020-09-17",
        "issn": "1077-2626",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "lecturer": [
            1577
        ],
        "location": "online",
        "pages_from": "3557",
        "pages_to": "3567",
        "volume": "Volume 26, Issue 12",
        "research_areas": [
            "Perception",
            "Rendering",
            "VR"
        ],
        "keywords": [
            "perception",
            "virtual reality",
            "user studies"
        ],
        "weblinks": [
            {
                "href": "https://youtu.be/cY6z2pD7dWc",
                "caption": "Conference Talk",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "additional-material",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "luidolt-2020-lightperceptionVR-additional-material.pdf",
                "type": "application/pdf",
                "size": 37540896,
                "path": "Publication:luidolt-2020-lightperceptionVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-additional-material.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-additional-material:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1478,
                "image_height": 534,
                "name": "luidolt-2020-lightperceptionVR-image.jpg",
                "type": "image/jpeg",
                "size": 1390298,
                "path": "Publication:luidolt-2020-lightperceptionVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-image.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "luidolt-2020-lightperceptionVR-paper.pdf",
                "type": "application/pdf",
                "size": 31511229,
                "path": "Publication:luidolt-2020-lightperceptionVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "slides",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "luidolt-2020-lightperceptionVR-slides.pdf",
                "type": "application/pdf",
                "size": 2661389,
                "path": "Publication:luidolt-2020-lightperceptionVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-slides.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-slides:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "luidolt-2020-lightperceptionVR-video.mp4",
                "type": "video/mp4",
                "size": 30909955,
                "path": "Publication:luidolt-2020-lightperceptionVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/luidolt-2020-lightperceptionVR-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/",
        "__class": "Publication"
    },
    {
        "id": "schindler_2020vis",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/58250",
        "title": "The Anatomical Edutainer",
        "date": "2020-10",
        "abstract": "Physical visualizations (i.e., data representations by means of physical objects) have been used for many centuries in medical and anatomical education. Recently, 3D printing techniques started also to emerge. Still, other medical physicalizations that rely on affordable and easy-to-find materials are limited, while smart strategies that take advantage of the optical properties of our physical world have not been thoroughly investigated. We propose the Anatomical Edutainer, a workflow to guide the easy, accessible, and affordable generation of physicalizations for tangible, interactive anatomical edutainment. The Anatomical Edutainer supports 2D printable and 3D foldable physicalizations that change their visual properties (i.e., hues of the visible spectrum) under colored lenses or colored lights, to reveal distinct anatomical structures through user interaction.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": true,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 256,
            "image_height": 192,
            "name": "schindler_2020vis-image.png",
            "type": "image/png",
            "size": 75476,
            "path": "Publication:schindler_2020vis",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2020/schindler_2020vis/schindler_2020vis-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/schindler_2020vis/schindler_2020vis-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1760,
            1464,
            1410
        ],
        "booktitle": "IEEE Vis Short Papers 2020",
        "cfp": {
            "name": "Short Paper Call for Participation.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "132515",
            "orig_name": "Short Paper Call for Participation.pdf",
            "ext": "pdf"
        },
        "event": "IEEE Vis 2020",
        "lecturer": [
            1410
        ],
        "pages_from": "1",
        "pages_to": "5",
        "research_areas": [
            "Fabrication",
            "IllVis",
            "MedVis",
            "Perception"
        ],
        "keywords": [
            "Data Physicalization",
            "Medical Visualization",
            "Anatomical Education"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 256,
                "image_height": 192,
                "name": "schindler_2020vis-image.png",
                "type": "image/png",
                "size": 75476,
                "path": "Publication:schindler_2020vis",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/schindler_2020vis/schindler_2020vis-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/schindler_2020vis/schindler_2020vis-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper preprint",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "preview_image_width": 416,
                "preview_image_height": 421,
                "name": "schindler_2020vis-paper preprint.pdf",
                "type": "application/pdf",
                "size": 26317989,
                "path": "Publication:schindler_2020vis",
                "preview_name": "schindler_2020vis-paper preprint:preview.JPG",
                "preview_type": "image/jpeg",
                "preview_size": 43449,
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/schindler_2020vis/schindler_2020vis-paper preprint.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/schindler_2020vis/schindler_2020vis-paper preprint:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2020/schindler_2020vis/",
        "__class": "Publication"
    },
    {
        "id": "kroesl-2020-XREye",
        "type_id": "otherreviewed",
        "tu_id": null,
        "repositum_id": null,
        "title": "XREye: Simulating Visual Impairments in Eye-Tracked XR ",
        "date": "2020-03",
        "abstract": "Many people suffer from visual impairments, which can be difficult for patients to describe and others to visualize. To aid in understanding what people with visual impairments experience, we demonstrate a set of medically informed simulations in eye-tracked XR of several common conditions that affect visual perception: refractive errors (myopia, hyperopia, and presbyopia), cornea disease, and age-related macular degeneration (wet and dry).",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "live demo in mozilla social hubs room",
            "filetitle": "image",
            "main_file": true,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 561,
            "image_height": 414,
            "name": "kroesl-2020-XREye-image.png",
            "type": "image/png",
            "size": 327573,
            "path": "Publication:kroesl-2020-XREye",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/kroesl-2020-XREye-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/kroesl-2020-XREye-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1030,
            1633,
            1636,
            1635,
            1634,
            193
        ],
        "booktitle": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
        "location": "(Atlanta) online",
        "open_access": "yes",
        "publisher": "IEEE",
        "research_areas": [
            "Perception",
            "Rendering",
            "VR"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": "extended abstract of the research demo",
                "filetitle": "extended abstract",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "kroesl-2020-XREye-extended abstract.pdf",
                "type": "application/pdf",
                "size": 121548,
                "path": "Publication:kroesl-2020-XREye",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/kroesl-2020-XREye-extended abstract.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/kroesl-2020-XREye-extended abstract:thumb{{size}}.png"
            },
            {
                "description": "live demo in mozilla social hubs room",
                "filetitle": "image",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 561,
                "image_height": 414,
                "name": "kroesl-2020-XREye-image.png",
                "type": "image/png",
                "size": 327573,
                "path": "Publication:kroesl-2020-XREye",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/kroesl-2020-XREye-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/kroesl-2020-XREye-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "poster",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "kroesl-2020-XREye-poster.pdf",
                "type": "application/pdf",
                "size": 3057039,
                "path": "Publication:kroesl-2020-XREye",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/kroesl-2020-XREye-poster.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/kroesl-2020-XREye-poster:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "kroesl-2020-XREye-video.mp4",
                "type": "video/mp4",
                "size": 8756217,
                "path": "Publication:kroesl-2020-XREye",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/kroesl-2020-XREye-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/kroesl-2020-XREye-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/kroesl-2020-XREye-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/",
        "__class": "Publication"
    },
    {
        "id": "Luidolt-2020-DA",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/1203",
        "title": "Perception of Light in Virtual Reality",
        "date": "2020-02",
        "abstract": "The perception of light and light incidence in the human eye is substantially different in real-world scenarios and virtual reality (VR) simulations. Standard low dynamic range displays, as used in common VR headsets, are not able to replicate the same light intensities we see in reality. Therefore, light phenomenons, such as temporal eye adaptation, perceptual glare, visual acuity reduction and scotopic color vision need to be simulated to generate realistic images. Even though, a physically based simulation of these effects could increase the perceived reality of VR applications, this topic has not been thoroughly researched yet. \nWe propose a post-processing workflow for VR and augmented reality (AR), using eye tracking, that is based on medical studies of the healthy human eye and is able to run in real time, to simulate light effects as close to reality as possible. We improve an existing temporal eye adaptation algorithm to be view-dependent. We adapt a medically based glare simulation to run in VR and AR. Additionally, we add eye tracking to adjust the glare intensity according to the viewing direction and the glare appearance depending on the user’s pupil size. We propose a new function fit for the reduction of visual acuity in VR head mounted displays. Finally, we include scotopic color vision for more realistic rendering of low-light scenes. \nWe conducted a primarily qualitative pilot study, comparing a real-world low-light scene to our VR simulation through individual, perceptual evaluation. Most participants mentioned, that the simulation of temporal eye adaptation, visual acuity reduction and scotopic color vision was similar or the same as their own perception in the real world. However, further work is necessary to improve the appearance and movement of our proposed glare kernel. We conclude, that our work has laid a ground base for further research regarding the simulation and individual adaptation to the perception of light in VR.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1920,
            "image_height": 1080,
            "name": "Luidolt-2020-DA-image.png",
            "type": "image/png",
            "size": 1606001,
            "path": "Publication:Luidolt-2020-DA",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2020/Luidolt-2020-DA/Luidolt-2020-DA-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/Luidolt-2020-DA/Luidolt-2020-DA-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1577
        ],
        "date_end": "2020-02-02",
        "date_start": "2019-04",
        "diploma_examina": "2020-02-10",
        "matrikelnr": "01427250 ",
        "supervisor": [
            1030,
            193
        ],
        "research_areas": [
            "Perception",
            "Rendering",
            "VR"
        ],
        "keywords": [
            "perception",
            "temporal eye adaptation",
            "glare",
            "virtual reality",
            "scotopic vision",
            "visual acuity reduction",
            "augmented reality"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1920,
                "image_height": 1080,
                "name": "Luidolt-2020-DA-image.png",
                "type": "image/png",
                "size": 1606001,
                "path": "Publication:Luidolt-2020-DA",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/Luidolt-2020-DA/Luidolt-2020-DA-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/Luidolt-2020-DA/Luidolt-2020-DA-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "poster",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "Luidolt-2020-DA-poster.pdf",
                "type": "application/pdf",
                "size": 6507701,
                "path": "Publication:Luidolt-2020-DA",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/Luidolt-2020-DA/Luidolt-2020-DA-poster.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/Luidolt-2020-DA/Luidolt-2020-DA-poster:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "Luidolt-2020-DA-thesis.pdf",
                "type": "application/pdf",
                "size": 15289421,
                "path": "Publication:Luidolt-2020-DA",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/Luidolt-2020-DA/Luidolt-2020-DA-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/Luidolt-2020-DA/Luidolt-2020-DA-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2020/Luidolt-2020-DA/",
        "__class": "Publication"
    },
    {
        "id": "panfili-2019-VAVR",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Effects of VR-Displays on Visual Acuity",
        "date": "2019-11",
        "abstract": "The perceived visual acuity (VA) of people in virtual reality (VR), using a head-mounted display (HMD), is not equal to their VA in the real world. The reason for this difference is the reduction of visual acuity in the virtual environment that is caused by various factors, such as the low resolution of the VR display. Based on those circumstances, the capacity of an individual to distinguish small details diminishes visibly. Previous studies regarding eyesight in VR have already verified how the best visual resolution in virtual environments is always lower than the natural vision and therefore this aspect could be seen as a mild vision impairment for the users of an HMD.\nThe goal of this project is to investigate how much the VA is reduced in VR and respectively whether the decrease of VA in VR is perceived similar by everyone or if visual impairments like Myopia, influence the visual perception.\nBased on a previous project, two different tests were implemented with the game engine Unreal Engine 4, a VR version for which an HTC VIVE headset was used, along with a desktop version. These tests were used to investigate the VA of the participant in a user study and the results have been compared to each other in order to find the extent to which visual impairments have an impact on VA.\n",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": true,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 1075,
            "image_height": 918,
            "name": "panfili-2019-VAVR-image.png",
            "type": "image/png",
            "size": 637497,
            "path": "Publication:panfili-2019-VAVR",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2019/panfili-2019-VAVR/panfili-2019-VAVR-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/panfili-2019-VAVR/panfili-2019-VAVR-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1658
        ],
        "date_end": "2019-11",
        "date_start": "2019-04",
        "matrikelnr": "01527648",
        "supervisor": [
            1030
        ],
        "research_areas": [
            "Perception",
            "Rendering",
            "VR"
        ],
        "keywords": [
            "virtual reality",
            "visual acuity"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 1075,
                "image_height": 918,
                "name": "panfili-2019-VAVR-image.png",
                "type": "image/png",
                "size": 637497,
                "path": "Publication:panfili-2019-VAVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/panfili-2019-VAVR/panfili-2019-VAVR-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/panfili-2019-VAVR/panfili-2019-VAVR-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "panfili-2019-VAVR-thesis.pdf",
                "type": "application/pdf",
                "size": 4765683,
                "path": "Publication:panfili-2019-VAVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/panfili-2019-VAVR/panfili-2019-VAVR-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/panfili-2019-VAVR/panfili-2019-VAVR-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/panfili-2019-VAVR/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2019-rld",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": null,
        "title": "A Comparison of Radial and Linear Charts for Visualizing Daily Patterns",
        "date": "2019-10",
        "abstract": "Radial charts are generally considered less effective than linear charts. Perhaps the only exception is in visualizing periodical time-dependent data, which is believed to be naturally supported by the radial layout. It has been demonstrated that the\ndrawbacks of radial charts outweigh the benefits of this natural mapping. Visualization of daily patterns, as a special case, has not been systematically evaluated using radial charts. In contrast to yearly or weekly recurrent trends, the analysis of daily patterns on a radial chart may benefit from our trained skill on reading radial clocks that are ubiquitous in our culture. In a crowd-sourced experiment with 92 non-expert users, we evaluated the accuracy, efficiency, and subjective ratings of radial and linear charts for visualizing daily traffic accident patterns. We systematically compared juxtaposed 12-hours variants and single 24-hours variants for both layouts in four low-level tasks and one high-level interpretation task. Our results show that over all tasks, the most elementary 24-hours linear bar chart is most accurate and efficient and is also preferred by the users. This provides strong evidence for the use of linear layouts – even for visualizing periodical daily patterns.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Daily patterns visualized in a 24-hours radial chart. ",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 497,
            "image_height": 474,
            "name": "waldner-2019-rld-teaser.png",
            "type": "image/png",
            "size": 53565,
            "path": "Publication:waldner-2019-rld",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110,
            1121,
            581,
            1204,
            1122,
            235
        ],
        "cfp": {
            "name": "IEEE VIS 2019 Call For Participation.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "545751",
            "orig_name": "IEEE VIS 2019 Call For Participation.pdf",
            "ext": "pdf"
        },
        "date_from": "2019-10-20",
        "date_to": "2019-10-25",
        "doi": "10.1109/TVCG.2019.2934784",
        "event": "IEEE VIS InfoVis",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "lecturer": [
            1110
        ],
        "location": "Vancouver, Canada",
        "pages_from": "1033",
        "pages_to": "1042",
        "volume": "26",
        "research_areas": [
            "InfoVis",
            "Perception"
        ],
        "keywords": [
            "radial charts",
            "time series data",
            "daily patterns",
            "crowd-sourced experiment"
        ],
        "weblinks": [
            {
                "href": "https://vimeo.com/371939694",
                "caption": "Presentation recording from IEEE VIS 2019 (VGTC Vimeo)",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2019-rld-paper.pdf",
                "type": "application/pdf",
                "size": 892625,
                "path": "Publication:waldner-2019-rld",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-paper:thumb{{size}}.png"
            },
            {
                "description": "Powerpoint slides of presentation at IEEE VIS 2019 (do not contain animations and videos). ",
                "filetitle": "slides ",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2019-rld-slides .pdf",
                "type": "application/pdf",
                "size": 12853249,
                "path": "Publication:waldner-2019-rld",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-slides .pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-slides :thumb{{size}}.png"
            },
            {
                "description": "Supplemental information about the user study",
                "filetitle": "supplement",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2019-rld-supplement.pdf",
                "type": "application/pdf",
                "size": 2511402,
                "path": "Publication:waldner-2019-rld",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-supplement.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-supplement:thumb{{size}}.png"
            },
            {
                "description": "Daily patterns visualized in a 24-hours radial chart. ",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 497,
                "image_height": 474,
                "name": "waldner-2019-rld-teaser.png",
                "type": "image/png",
                "size": 53565,
                "path": "Publication:waldner-2019-rld",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video preview",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2019-rld-video preview.mp4",
                "type": "video/mp4",
                "size": 3082628,
                "path": "Publication:waldner-2019-rld",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-video preview.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-video preview:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-video preview:video.mp4"
            }
        ],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/",
        "__class": "Publication"
    },
    {
        "id": "koch-2019-PR",
        "type_id": "studentproject",
        "tu_id": null,
        "repositum_id": null,
        "title": "Simulation of Diabetic Macular Edema in Virtual Reality",
        "date": "2019-08",
        "abstract": "Simulation of diabetic macular edema (DME) is implemented in a virtual reality simulation using Unreal Engine 4. Common symptoms of DME are blurry vision, loss of contrast, floaters and distorted vision. We use different computer graphics techniques to create effects which resemble such symptoms.\nAn eye tracker from Pupil Labs is used in order to make effects gaze dependent.\nThe implementation of these effects is discussed and adjustable parameters\nof the effects are explained.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": true,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 1920,
            "image_height": 1026,
            "name": "koch-2019-PR-image.jpg",
            "type": "image/jpeg",
            "size": 206235,
            "path": "Publication:koch-2019-PR",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2019/koch-2019-PR/koch-2019-PR-image.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/koch-2019-PR/koch-2019-PR-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1604
        ],
        "date_end": "2019-08",
        "date_start": "2019-04",
        "matrikelnr": "01526232 ",
        "note": "1",
        "supervisor": [
            1030
        ],
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 1920,
                "image_height": 1026,
                "name": "koch-2019-PR-image.jpg",
                "type": "image/jpeg",
                "size": 206235,
                "path": "Publication:koch-2019-PR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/koch-2019-PR/koch-2019-PR-image.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/koch-2019-PR/koch-2019-PR-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "report",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "koch-2019-PR-report.pdf",
                "type": "application/pdf",
                "size": 3704736,
                "path": "Publication:koch-2019-PR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/koch-2019-PR/koch-2019-PR-report.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/koch-2019-PR/koch-2019-PR-report:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/koch-2019-PR/",
        "__class": "Publication"
    },
    {
        "id": "kroesl-2019-ThesisFF",
        "type_id": "otherreviewed",
        "tu_id": 282889,
        "repositum_id": null,
        "title": "Simulating Vision Impairments in VR and AR",
        "date": "2019-06-30",
        "abstract": "1.3 billion people worldwide are affected by vision impairments,\naccording to the World Health Organization. However, vision impairments\nare hardly ever taken into account when we design our\ncities, buildings, emergency signposting, or lighting systems. With\nthis research, we want to develop realistic, medically based simulations\nof eye diseases in VR and AR, which allow calibrating vision\nimpairments to the same level for different users. This allows us\nto conduct user studies with participants with normal sight and\ngraphically simulated vision impairments, to determine the effects\nof these impairments on perception, and to investigate lighting\nconcepts under impaired vision conditions. This thesis will, for the\nfirst time, provide methods for architects and designers to evaluate\ntheir designs for accessibility and to develop lighting systems that\ncan enhance the perception of people with vision impairments.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": true,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 720,
            "image_height": 412,
            "name": "kroesl-2019-ThesisFF-image.jpg",
            "type": "image/jpeg",
            "size": 188500,
            "path": "Publication:kroesl-2019-ThesisFF",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/kroesl-2019-ThesisFF-image.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/kroesl-2019-ThesisFF-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1030
        ],
        "booktitle": "ACM SIGGRAPH THESIS FAST FORWARD 2019",
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "vision impairments",
            "cataracts",
            "virtual reality",
            "augmented reality",
            "user study"
        ],
        "weblinks": [
            {
                "href": "https://s2019.siggraph.org/conference/programs-events/organization-events/thesis-papers-fast-forward/",
                "caption": null,
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "extended abstract",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "kroesl-2019-ThesisFF-extended abstract.pdf",
                "type": "application/pdf",
                "size": 260300,
                "path": "Publication:kroesl-2019-ThesisFF",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/kroesl-2019-ThesisFF-extended abstract.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/kroesl-2019-ThesisFF-extended abstract:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "image",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 720,
                "image_height": 412,
                "name": "kroesl-2019-ThesisFF-image.jpg",
                "type": "image/jpeg",
                "size": 188500,
                "path": "Publication:kroesl-2019-ThesisFF",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/kroesl-2019-ThesisFF-image.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/kroesl-2019-ThesisFF-image:thumb{{size}}.png"
            },
            {
                "description": "submitted video",
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "kroesl-2019-ThesisFF-video.mp4",
                "type": "video/mp4",
                "size": 55490769,
                "path": "Publication:kroesl-2019-ThesisFF",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/kroesl-2019-ThesisFF-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/kroesl-2019-ThesisFF-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/kroesl-2019-ThesisFF-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "abteilung"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/",
        "__class": "Publication"
    },
    {
        "id": "plank-2017-sldg",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/8559",
        "title": "Effective Line Drawing Generation",
        "date": "2019-05-27",
        "abstract": "Advanced rendering algorithms such as suggestive contours are able to depict objects in the style of line drawings with various levels of detail. How to select an appropriate level of detail is based on visual aesthetics rather than on substantial characteristics like the accuracy of 3D shape perception. The aim of this thesis is to develop a novel approach for effectively generating line drawings in the style of suggestive contours that are optimized for human 3D shape perception while retaining the amount of ink to a minimum. The proposed post-processing meta-heuristic for optimizing line drawings uses empirical thresholds based on probing human shape perception. The heuristic can also\nbe used to optimize line drawings in terms of other visual characteristics, e.g., cognitive load, and for other line drawings styles such as ridges and valleys.\nThe optimization routine is based on a conducted perceptual user study using the gauge figure task to collect more than 17, 000 high-quality user estimates of surface normals from suggestive contours renderings. By analyzing these data points, more in-depth understanding of how humans perceive 3D shape from line drawings is gained. Particularly the accuracy of 3D shape perception and shape ambiguity in regards to changing the level of detail and type of object presented is investigated. In addition, the collected data points are used to calculate two pixel-based perceptual characteristics: the optimal size of a local neighborhood area to estimate 3D shape from and the optimal local ink percentage in this area.\nIn the analysis, a neighborhood size of 36 pixels with an optimal ink percentage of\n17.3% could be identified. These thresholds are used to optimize suggestive contours\nrenderings in a post-processing stage using a greedy nearest neighbor optimization scheme.\nThe proposed meta-heuristic procedure yields visually convincing results where each\npixel value is close to the identified thresholds. In terms of practical application, the optimization scheme can be used in areas where high 3D shape understanding is essential such as furniture manuals or architectural renderings. Both the empirical results regarding shape understanding as well as the practical applications of the thesis’s results form the basis to optimize other line drawing methods and to understand better how humans\nperceive shape from lines.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": true,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 192,
            "image_height": 190,
            "name": "plank-2017-sldg-image.JPG",
            "type": "image/jpeg",
            "size": 16355,
            "path": "Publication:plank-2017-sldg",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2019/plank-2017-sldg/plank-2017-sldg-image.JPG",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/plank-2017-sldg/plank-2017-sldg-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1293
        ],
        "date_end": "2019-05-27",
        "date_start": "2017-07-01",
        "diploma_examina": "2019-05-27",
        "doi": "10.34726/hss.2018.56226",
        "matrikelnr": "1225804",
        "open_access": "yes",
        "pages": "84",
        "supervisor": [
            171
        ],
        "research_areas": [
            "IllVis",
            "Perception"
        ],
        "keywords": [
            "visualization"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 192,
                "image_height": 190,
                "name": "plank-2017-sldg-image.JPG",
                "type": "image/jpeg",
                "size": 16355,
                "path": "Publication:plank-2017-sldg",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/plank-2017-sldg/plank-2017-sldg-image.JPG",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/plank-2017-sldg/plank-2017-sldg-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Master Thesis",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "name": "plank-2017-sldg-Master Thesis.pdf",
                "type": "application/pdf",
                "size": 17505561,
                "path": "Publication:plank-2017-sldg",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/plank-2017-sldg/plank-2017-sldg-Master Thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/plank-2017-sldg/plank-2017-sldg-Master Thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/plank-2017-sldg/",
        "__class": "Publication"
    },
    {
        "id": "schuller_reichl-2019-avt",
        "type_id": "masterthesis",
        "tu_id": 283046,
        "repositum_id": null,
        "title": "Mapping of Realism in Rendering onto Perception of Presence in Augmented Reality",
        "date": "2019-03",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1725
        ],
        "date_end": "2019",
        "date_start": "2017",
        "matrikelnr": "00825849",
        "supervisor": [
            378
        ],
        "research_areas": [
            "Perception",
            "VR"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [
            "vr"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/schuller_reichl-2019-avt/",
        "__class": "Publication"
    },
    {
        "id": "Vasylevska_Khrystyna-2019-TEFVR",
        "type_id": "inproceedings",
        "tu_id": 279159,
        "repositum_id": null,
        "title": "Towards Eye-Friendly VR: How Bright Should It Be?",
        "date": "2019-03",
        "abstract": "Visual information plays an important part in the perception of the world around us. Recently, head-mounted displays (HMD) came to the consumer market and became a part of the everyday life of thousands of people. Like with the desktop screens or hand-held devices before, the public is concerned with the possible health consequences of the prolonged usage and question the adequacy of the default settings. It has been shown that the brightness and contrast of a display should be adjusted to match the external light to decrease eye strain and other symptoms. Currently, there is a noticeable mismatch in brightness between the screen and dark background of an HMD that might cause eye strain, insomnia, and other unpleasant symptoms.\n\nIn this paper, we explore the possibility to significantly lower the screen brightness in the HMD and successfully compensate for the loss of the visual information on a dimmed screen. We designed a user study to explore the connection between the screen brightness HMD and task performance, cybersickness, users’ comfort, and preferences. We have tested three levels of brightness: the default Full Brightness, the optional Night Mode and a significantly lower brightness with original content and compensated content.   Our results suggest that although users still prefer the brighter setting, the HMDs can be successfully used with significantly lower screen brightness, especially if the low screen brightness is compensated",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1712,
            1713,
            1714,
            378
        ],
        "booktitle": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
        "cfp": {
            "name": "IEEE VR 2019 Call for Conference Papers.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "312223",
            "orig_name": "IEEE VR 2019 Call for Conference Papers.pdf",
            "ext": "pdf"
        },
        "doi": "10.1109/VR.2019.8797752",
        "event": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
        "issn": "2642-5246 ",
        "lecturer": [
            1712
        ],
        "location": "Osaka, Japan",
        "open_access": "yes",
        "pages_from": "1",
        "pages_to": "9",
        "publisher": "IEEE",
        "research_areas": [
            "Perception"
        ],
        "keywords": [
            "Virtual Reality",
            "User Study",
            "Perception",
            " Head-Mounted Display"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "Vasylevska_Khrystyna-2019-TEFVR-paper.pdf",
                "type": "application/pdf",
                "size": 2620917,
                "path": "Publication:Vasylevska_Khrystyna-2019-TEFVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/Vasylevska_Khrystyna-2019-TEFVR/Vasylevska_Khrystyna-2019-TEFVR-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/Vasylevska_Khrystyna-2019-TEFVR/Vasylevska_Khrystyna-2019-TEFVR-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vr"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/Vasylevska_Khrystyna-2019-TEFVR/",
        "__class": "Publication"
    },
    {
        "id": "kroesl-2019-ICthroughVR",
        "type_id": "inproceedings",
        "tu_id": 283362,
        "repositum_id": null,
        "title": "ICthroughVR: Illuminating Cataracts through Virtual Reality",
        "date": "2019-03",
        "abstract": "Vision impairments, such as cataracts, affect how many people interact with their environment, yet are rarely considered by architects and lighting designers because of a lack of design tools. To address this, we present a method to simulate vision impairments caused by cataracts in virtual reality (VR), using eye tracking for gaze-dependent effects. We conducted a user study to investigate how lighting affects visual perception for users with cataracts. Unlike past approaches, we account for the user's vision and some constraints of VR headsets, allowing for calibration of our simulation to the same level of degraded vision for all participants.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": true,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 828,
            "image_height": 828,
            "name": "kroesl-2019-ICthroughVR-image.png",
            "type": "image/png",
            "size": 700309,
            "path": "Publication:kroesl-2019-ICthroughVR",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/kroesl-2019-ICthroughVR-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/kroesl-2019-ICthroughVR-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1030,
            1633,
            1636,
            1635,
            193,
            1634
        ],
        "booktitle": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces",
        "cfp": {
            "name": "IEEE VR 2019 Call for Conference Papers.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "226992",
            "orig_name": "IEEE VR 2019 Call for Conference Papers.pdf",
            "ext": "pdf"
        },
        "date_from": "2019-03-23",
        "date_to": "2019-03-27",
        "doi": "10.1109/VR.2019.8798239",
        "event": "IEEE VR 2019, the 26th IEEE Conference on Virtual Reality and 3D User Interfaces",
        "lecturer": [
            1030
        ],
        "location": "Osaka, Japan",
        "pages_from": "655",
        "pages_to": "663",
        "publisher": "IEEE",
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "vision impairments",
            "cataracts",
            "virtual reality",
            "user study"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 828,
                "image_height": 828,
                "name": "kroesl-2019-ICthroughVR-image.png",
                "type": "image/png",
                "size": 700309,
                "path": "Publication:kroesl-2019-ICthroughVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/kroesl-2019-ICthroughVR-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/kroesl-2019-ICthroughVR-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper_preprint",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "kroesl-2019-ICthroughVR-paper_preprint.pdf",
                "type": "application/pdf",
                "size": 21654478,
                "path": "Publication:kroesl-2019-ICthroughVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/kroesl-2019-ICthroughVR-paper_preprint.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/kroesl-2019-ICthroughVR-paper_preprint:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "kroesl-2019-ICthroughVR-video.mp4",
                "type": "video/mp4",
                "size": 59381889,
                "path": "Publication:kroesl-2019-ICthroughVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/kroesl-2019-ICthroughVR-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/kroesl-2019-ICthroughVR-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/kroesl-2019-ICthroughVR-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "rend",
            "VRVis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/",
        "__class": "Publication"
    },
    {
        "id": "trautner-2018-imd",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/7274",
        "title": "Importance-Driven Exploration of Molecular Dynamics Simulations",
        "date": "2018-10-03",
        "abstract": "The aim of this thesis is a novel real-time visualization approach for exploring molecular dynamics (MD-)simulations. Through the constantly improving hardware and everincreasing computing power, MD-simulations are more easily available. Additionally, they consist of hundreds, thousands or even millions of individual simulation frames and are getting more and more detailed. The calculation of such simulations is no longer limited by algorithms or hardware, nevertheless it is still not possible to efficiently explore this huge amount of simulation data, as animated 3D visualization, with ordinary and well established visualization tools. Using current software tools, the exploration of such long simulations takes too much time and due to the complexity of large molecular scenes, the visualizations highly suffer from visual clutter. It is therefore very likely that the user will miss important events.\nTherefore, we designed a focus & context approach for MD-simulations that guides the\nuser to the most relevant temporal and spatial events, and it is no longer necessary to explore the simulation in a linear fashion. Our contribution can be divided into the following four topics:\n1. Spatial importance through different levels of detail. Depending on the type of\nresearch task, different geometrical representations can be selected for both, focusand context elements.\n2. Importance driven visibility management through ghosting, to prevent context\nelements from occluding focus elements.\n3. Temporal importance through adaptive fast-forward. The playback speed of the\nsimulation is thereby dependent on a single or a combination of multiple importance\nfunctions.\n4. Visual declutter of accumulated frames through motion blur, which additionally\nillustrates the playback speed-up.\nSince the very beginning, this work was developed in close cooperation with biochemists from the Loschmidt Laboratories in Brno, Czech Republic. Together, we analyzed different use cases demonstrating the flexibility of our novel focus & context approach. ",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": true,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 567,
            "image_height": 480,
            "name": "trautner-2018-imd-image.png",
            "type": "image/png",
            "size": 278645,
            "path": "Publication:trautner-2018-imd",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2018/trautner-2018-imd/trautner-2018-imd-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/trautner-2018-imd/trautner-2018-imd-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1196
        ],
        "date_end": "2018-11-03",
        "date_start": "2018-01",
        "doi": "10.34726/hss.2018.53541",
        "matrikelnr": "01125421",
        "open_access": "yes",
        "pages": "100",
        "supervisor": [
            1110,
            166
        ],
        "research_areas": [
            "BioVis",
            "Perception"
        ],
        "keywords": [
            "molecular dynamics simulation",
            "realtime visualization"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 567,
                "image_height": 480,
                "name": "trautner-2018-imd-image.png",
                "type": "image/png",
                "size": 278645,
                "path": "Publication:trautner-2018-imd",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/trautner-2018-imd/trautner-2018-imd-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/trautner-2018-imd/trautner-2018-imd-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Master Thesis",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "trautner-2018-imd-Master Thesis.pdf",
                "type": "application/pdf",
                "size": 18870686,
                "path": "Publication:trautner-2018-imd",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/trautner-2018-imd/trautner-2018-imd-Master Thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/trautner-2018-imd/trautner-2018-imd-Master Thesis:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Poster",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "name": "trautner-2018-imd-Poster.pdf",
                "type": "application/pdf",
                "size": 4239145,
                "path": "Publication:trautner-2018-imd",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/trautner-2018-imd/trautner-2018-imd-Poster.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/trautner-2018-imd/trautner-2018-imd-Poster:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis",
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2018/trautner-2018-imd/",
        "__class": "Publication"
    },
    {
        "id": "kroesl-2018-DC",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "[DC] Computational Design of Smart Lighting Systems for Visually Impaired People, using VR and AR Simulations",
        "date": "2018-10",
        "abstract": "This Doctoral Consortium paper presents my dissertation research in a multidisciplinary setting, spanning over the areas of architecture, specifically lighting design and building information modeling, to virtual reality (VR) and perception.\nSince vision impairments are hardly taken into account in architecture and lighting design today, this research aims to provide the necessary tools to quantify the effects of vision impairments, so design guidelines regarding these impairments can be developed.\nAnother research goal is the determination of the influence of different lighting conditions on the perception of people with vision impairments.\nThis would allow us to develop smart lighting systems that can aid visually impaired people by increasing their visual perception of their environment.\nThis paper also outlines the concept for a tool to automatically generate lighting solutions and compare and test them in VR, as design aid for architects and lighting designers.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 642,
            "image_height": 595,
            "name": "kroesl-2018-DC-image.png",
            "type": "image/png",
            "size": 316715,
            "path": "Publication:kroesl-2018-DC",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-DC/kroesl-2018-DC-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-DC/kroesl-2018-DC-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1030
        ],
        "booktitle": "Proceedings of the 2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
        "event": "ISMAR 2018",
        "lecturer": [
            1030
        ],
        "location": "Munich",
        "open_access": "no",
        "publisher": "IEEE",
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "vision impairments",
            "lighting design",
            "virtual reality",
            "user study"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 642,
                "image_height": 595,
                "name": "kroesl-2018-DC-image.png",
                "type": "image/png",
                "size": 316715,
                "path": "Publication:kroesl-2018-DC",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-DC/kroesl-2018-DC-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-DC/kroesl-2018-DC-image:thumb{{size}}.png"
            },
            {
                "description": "author's version",
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "kroesl-2018-DC-paper.pdf",
                "type": "application/pdf",
                "size": 3617601,
                "path": "Publication:kroesl-2018-DC",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-DC/kroesl-2018-DC-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-DC/kroesl-2018-DC-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend",
            "VRVis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-DC/",
        "__class": "Publication"
    },
    {
        "id": "kroesl-2018-TVS",
        "type_id": "poster",
        "tu_id": null,
        "repositum_id": null,
        "title": "The Virtual Schoolyard: Attention Training in Virtual Reality for Children with Attentional Disorders",
        "date": "2018-08",
        "abstract": "This work presents a virtual reality simulation for training different attentional abilities in children and adolescents. In an interdisciplinary project between psychology and computer science, we developed four mini-games that are used during therapy sessions to battle different aspects of attentional disorders. First experiments show that the immersive game-like application is well received by children. Our tool is also currently part of a treatment program in an ongoing clinical study.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1500,
            "image_height": 1000,
            "name": "kroesl-2018-TVS-.png",
            "type": "image/png",
            "size": 2314610,
            "path": "Publication:kroesl-2018-TVS",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/kroesl-2018-TVS-.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/kroesl-2018-TVS-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1030,
            1599,
            1600,
            1601,
            1602,
            193,
            1603
        ],
        "cfp": {
            "name": "call_for_posters.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "630843",
            "orig_name": "call_for_posters.pdf",
            "ext": "pdf"
        },
        "date_from": "2018-08-12",
        "date_to": "2018-08-16",
        "doi": "10.1145/3230744.3230817",
        "event": "ACM SIGGRAPH 2018",
        "isbn": "978-1-4503-5817-0",
        "location": "Vancouver, Canada",
        "pages_from": "Article 27",
        "publisher": "ACM",
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "virtual reality",
            "attentional disorders",
            "user study"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1500,
                "image_height": 1000,
                "name": "kroesl-2018-TVS-.png",
                "type": "image/png",
                "size": 2314610,
                "path": "Publication:kroesl-2018-TVS",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/kroesl-2018-TVS-.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/kroesl-2018-TVS-:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "abstract",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "kroesl-2018-TVS-abstract.pdf",
                "type": "application/pdf",
                "size": 20280477,
                "path": "Publication:kroesl-2018-TVS",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/kroesl-2018-TVS-abstract.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/kroesl-2018-TVS-abstract:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "poster",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "preview_image_width": 900,
                "preview_image_height": 1200,
                "name": "kroesl-2018-TVS-poster.pdf",
                "type": "application/pdf",
                "size": 5352291,
                "path": "Publication:kroesl-2018-TVS",
                "preview_name": "kroesl-2018-TVS-poster:preview.png",
                "preview_type": "image/png",
                "preview_size": 1288394,
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/kroesl-2018-TVS-poster.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/kroesl-2018-TVS-poster:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "preview_image_width": 853,
                "preview_image_height": 481,
                "name": "kroesl-2018-TVS-video.mp4",
                "type": "video/mp4",
                "size": 161407034,
                "path": "Publication:kroesl-2018-TVS",
                "preview_name": "kroesl-2018-TVS-video:preview.png",
                "preview_type": "image/png",
                "preview_size": 105824,
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/kroesl-2018-TVS-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/kroesl-2018-TVS-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/kroesl-2018-TVS-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "rend",
            "VRVis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/",
        "__class": "Publication"
    },
    {
        "id": "HECHER-2017-HDY",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": null,
        "title": "How Do Users Map Points Between Dissimilar Shapes?",
        "date": "2018-08",
        "abstract": "Finding similar points in globally or locally similar shapes has been studied extensively through the use of various point descriptors or shape-matching methods. However, little work exists on finding similar points in dissimilar shapes. In this paper, we present the results of a study where users were given two dissimilar two-dimensional shapes and asked to map a given point in the first shape to the point in the second shape they consider most similar. We find that user mappings in this study correlate strongly with simple geometric relationships between points and shapes. To predict the probability distribution of user mappings between any pair of simple two-dimensional shapes, two distinct statistical models are defined using these relationships. We perform a thorough validation of the accuracy of these predictions and compare our models qualitatively and quantitatively to well-known shape-matching methods. Using our predictive models, we propose an approach to map objects or procedural content between different shapes in different design scenarios.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1323,
            "image_height": 742,
            "name": "HECHER-2017-HDY-image.png",
            "type": "image/png",
            "size": 1637744,
            "path": "Publication:HECHER-2017-HDY",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2018/HECHER-2017-HDY/HECHER-2017-HDY-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/HECHER-2017-HDY/HECHER-2017-HDY-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            779,
            627,
            194,
            193
        ],
        "doi": "10.1109/TVCG.2017.2730877",
        "issn": "1077-2626",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "number": "8",
        "open_access": "no",
        "pages_from": "2327",
        "pages_to": "2338",
        "volume": "24",
        "research_areas": [
            "Modeling",
            "Perception"
        ],
        "keywords": [
            "shape matching",
            "transformations",
            "shape similarity"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "draft",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "HECHER-2017-HDY-draft.pdf",
                "type": "application/pdf",
                "size": 13870249,
                "path": "Publication:HECHER-2017-HDY",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/HECHER-2017-HDY/HECHER-2017-HDY-draft.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/HECHER-2017-HDY/HECHER-2017-HDY-draft:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1323,
                "image_height": 742,
                "name": "HECHER-2017-HDY-image.png",
                "type": "image/png",
                "size": 1637744,
                "path": "Publication:HECHER-2017-HDY",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/HECHER-2017-HDY/HECHER-2017-HDY-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/HECHER-2017-HDY/HECHER-2017-HDY-image:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2018/HECHER-2017-HDY/",
        "__class": "Publication"
    },
    {
        "id": "Kathi-2018-VRB",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": null,
        "title": "A VR-based user study on the effects of vision impairments on recognition distances of escape-route signs in buildings",
        "date": "2018-04-30",
        "abstract": "In workplaces or publicly accessible buildings, escape routes are signposted according to official norms or international standards that specify distances, angles and areas of interest for the positioning of escape-route signs. In homes for the elderly, in which the residents commonly have degraded mobility and suffer from vision impairments caused by age or eye diseases, the specifications of current norms and standards may be insufficient. Quantifying the effect of symptoms of vision impairments like reduced visual acuity on recognition distances is challenging, as it is cumbersome to find a large number of user study participants who suffer from exactly the same form of vision impairments. Hence, we propose a new methodology for such user studies: By conducting a user study in virtual reality (VR), we are able to use participants with normal or corrected sight and simulate vision impairments graphically. The use of standardized medical eyesight tests in VR allows us to calibrate the visual acuity of all our participants to the same level, taking their respective visual acuity into account. Since we primarily focus on homes for the elderly, we accounted for their often limited mobility by implementing a wheelchair simulation for our VR application.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 327,
            "image_height": 327,
            "name": "Kathi-2018-VRB-image.JPG",
            "type": "image/jpeg",
            "size": 24451,
            "path": "Publication:Kathi-2018-VRB",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2018/Kathi-2018-VRB/Kathi-2018-VRB-image.JPG",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/Kathi-2018-VRB/Kathi-2018-VRB-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1030,
            1551,
            678,
            1492,
            193,
            1559
        ],
        "date_from": "2018-06-11",
        "date_to": "2018-06-14",
        "doi": "10.1007/s00371-018-1517-7",
        "event": "Computer Graphics International (CGI)",
        "issn": "0178-2789",
        "journal": "The Visual Computer",
        "lecturer": [
            1030
        ],
        "location": "Bintan, Indonesia",
        "number": "6-8",
        "open_access": "yes",
        "pages_from": "911",
        "pages_to": "923",
        "volume": "34",
        "research_areas": [
            "Perception",
            "Rendering",
            "VR"
        ],
        "keywords": [],
        "weblinks": [
            {
                "href": "https://link.springer.com/article/10.1007%2Fs00371-018-1517-7",
                "caption": null,
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 327,
                "image_height": 327,
                "name": "Kathi-2018-VRB-image.JPG",
                "type": "image/jpeg",
                "size": 24451,
                "path": "Publication:Kathi-2018-VRB",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/Kathi-2018-VRB/Kathi-2018-VRB-image.JPG",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/Kathi-2018-VRB/Kathi-2018-VRB-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "Kathi-2018-VRB-Paper.pdf",
                "type": "application/pdf",
                "size": 1077352,
                "path": "Publication:Kathi-2018-VRB",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/Kathi-2018-VRB/Kathi-2018-VRB-Paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/Kathi-2018-VRB/Kathi-2018-VRB-Paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend",
            "VRVis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2018/Kathi-2018-VRB/",
        "__class": "Publication"
    },
    {
        "id": "polatsek-2018-stv",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": null,
        "title": "Exploring visual attention and saliency modeling for task-based visual analysis",
        "date": "2018-02",
        "abstract": "Memory, visual attention and perception play a critical role in the design of visualizations. The way users observe a visualization is affected by salient stimuli in a scene as well as by domain knowledge, interest, and the task. While recent saliency models manage to predict the users’ visual attention in visualizations during exploratory analysis, there is little evidence how much influence bottom-up saliency has on task-based visual analysis. Therefore, we performed an eye-tracking study with 47 users to determine the users’ path of attention when solving three low-level analytical tasks using 30 different charts from the MASSVIS database [1]. We also compared our task-based eye tracking data to the data from the original memorability experiment by Borkin et al. [2]. We found that solving a task leads to more consistent viewing patterns compared to exploratory visual analysis. However, bottom-up saliency of a visualization has negligible influence on users’ fixations and task efficiency when performing a low-level analytical task. Also, the efficiency of visual search for an extreme target data point is barely influenced by the target’s bottom-up saliency. Therefore, we conclude that bottom-up saliency models tailored towards information visualization are not suitable for predicting visual attention when performing task-based visual analysis. We discuss potential reasons and suggest extensions to visual attention models to better account for task-based visual analysis.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "graphical abstract",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 591,
            "image_height": 584,
            "name": "polatsek-2018-stv-graphical abstract.png",
            "type": "image/png",
            "size": 193042,
            "path": "Publication:polatsek-2018-stv",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/polatsek-2018-stv-graphical abstract.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/polatsek-2018-stv-graphical abstract:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1545,
            1110,
            171,
            1546,
            1547
        ],
        "doi": "https://doi.org/10.1016/j.cag.2018.01.010",
        "journal": "Computers & Graphics",
        "number": "2",
        "open_access": "no",
        "research_areas": [
            "InfoVis",
            "Perception"
        ],
        "keywords": [
            "Information visualization",
            "Eye-tracking experiment",
            "Saliency",
            "Visual attention",
            "Low-level analytical tasks"
        ],
        "weblinks": [
            {
                "href": "https://www.sciencedirect.com/science/article/pii/S0097849318300104",
                "caption": null,
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "graphical abstract",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 591,
                "image_height": 584,
                "name": "polatsek-2018-stv-graphical abstract.png",
                "type": "image/png",
                "size": 193042,
                "path": "Publication:polatsek-2018-stv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/polatsek-2018-stv-graphical abstract.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/polatsek-2018-stv-graphical abstract:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "polatsek-2018-stv-paper.pdf",
                "type": "application/pdf",
                "size": 3228380,
                "path": "Publication:polatsek-2018-stv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/polatsek-2018-stv-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/polatsek-2018-stv-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis",
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/",
        "__class": "Publication"
    },
    {
        "id": "waldin-2017-thesis",
        "type_id": "phdthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Using and Adapting to Limits of Human Perception in Visualization",
        "date": "2017-11",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1461
        ],
        "duration": "3 years",
        "reviewer_1": [
            1526
        ],
        "reviewer_2": [
            1313
        ],
        "rigorosum": "2017-11-06",
        "supervisor": [
            171
        ],
        "research_areas": [
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldin-2017-thesis-thesis.pdf",
                "type": "application/pdf",
                "size": 3029725,
                "path": "Publication:waldin-2017-thesis",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldin-2017-thesis/waldin-2017-thesis-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldin-2017-thesis/waldin-2017-thesis-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldin-2017-thesis/",
        "__class": "Publication"
    },
    {
        "id": "KREUZER-2017-PBF",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Using Perception-Based Filtering to Hide Shadow Artifacts",
        "date": "2017-10-05",
        "abstract": "Shadows are an indispensable aid for understanding spatial relations of objects in\nnatural scenes, which is why they are very important for real-time rendering applications.\nCombining filtering techniques with shadow mapping is a common tool to simulate\nvisually-pleasing shadows in interactive applications. A positive effect of such approaches\nis that the filtering blurs aliasing artifacts caused by sampling the discretized geometric\ndata stored in the shadow map, thereby improving the visual quality of the shadow.\nThe goal of this thesis is to exploit common filtering algorithms, in order to find a\nfunction of blur radius and shadow-map sampling frequency, which allows for optimized\ncomputational performance while mostly preserving the visual quality of the shadow.\nIn the course of this work, we investigate how shadow artifacts arise and how to hide\nthem. We set up and execute a user study to find the optimal relation between the\nshadow-map sampling frequency and the filter radius. From the results of the user study,\nwe derive a formula and develop an algorithm that can be incorporated into existing\nshadow-mapping algorithms. We evaluate our results by applying the algorithm to a\ncustom-made rendering framework and observe an increase in processing speeds.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 542,
            "image_height": 454,
            "name": "KREUZER-2017-PBF-image.png",
            "type": "image/png",
            "size": 112349,
            "path": "Publication:KREUZER-2017-PBF",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2017/KREUZER-2017-PBF/KREUZER-2017-PBF-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/KREUZER-2017-PBF/KREUZER-2017-PBF-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1046
        ],
        "date_end": "2017-10-05",
        "date_start": "2016-06-01",
        "diploma_examina": "2017-10-05",
        "matrikelnr": "0827433",
        "supervisor": [
            193,
            779
        ],
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "soft shadows"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 542,
                "image_height": 454,
                "name": "KREUZER-2017-PBF-image.png",
                "type": "image/png",
                "size": 112349,
                "path": "Publication:KREUZER-2017-PBF",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/KREUZER-2017-PBF/KREUZER-2017-PBF-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/KREUZER-2017-PBF/KREUZER-2017-PBF-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "poster",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "KREUZER-2017-PBF-poster.pdf",
                "type": "application/pdf",
                "size": 2166045,
                "path": "Publication:KREUZER-2017-PBF",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/KREUZER-2017-PBF/KREUZER-2017-PBF-poster.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/KREUZER-2017-PBF/KREUZER-2017-PBF-poster:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "KREUZER-2017-PBF-thesis.pdf",
                "type": "application/pdf",
                "size": 9488073,
                "path": "Publication:KREUZER-2017-PBF",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/KREUZER-2017-PBF/KREUZER-2017-PBF-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/KREUZER-2017-PBF/KREUZER-2017-PBF-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2017/KREUZER-2017-PBF/",
        "__class": "Publication"
    },
    {
        "id": "mindek-2017-dsn",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": null,
        "title": "Data-Sensitive Visual Navigation",
        "date": "2017-10",
        "abstract": "In visualization systems it is often the case that the changes of the input parameters are not proportional to the visual change of the generated output. In this paper, we propose a model for enabling data-sensitive navigation for user-interface elements. This model is applied to normalize the user input according to the visual change, and also to visually communicate this normalization. In this way, the exploration of heterogeneous data using common interaction elements can be performed in an efficient way. We apply our model to the field of medical visualization and present guided navigation tools for traversing vascular structures and for camera rotation around 3D volumes. The presented examples demonstrate that the model scales to user-interface elements where multiple parameters are set simultaneously.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 1020,
            "image_height": 1020,
            "name": "mindek-2017-dsn-.png",
            "type": "image/png",
            "size": 700890,
            "path": "Publication:mindek-2017-dsn",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-dsn/mindek-2017-dsn-.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-dsn/mindek-2017-dsn-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            935,
            869,
            166,
            161
        ],
        "date_from": "2017-05-15",
        "date_to": "2017-05-17",
        "event": "SCCG 2017",
        "journal": "Computers & Graphics",
        "lecturer": [
            166
        ],
        "location": "Mikulov, Czech Republic",
        "number": "C",
        "pages_from": "77",
        "pages_to": "85",
        "volume": "67",
        "research_areas": [
            "MedVis",
            "Perception"
        ],
        "keywords": [
            "navigation",
            "exploration",
            "medical visualization"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 1020,
                "image_height": 1020,
                "name": "mindek-2017-dsn-.png",
                "type": "image/png",
                "size": 700890,
                "path": "Publication:mindek-2017-dsn",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-dsn/mindek-2017-dsn-.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-dsn/mindek-2017-dsn-:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "mindek-2017-dsn-Paper.pdf",
                "type": "application/pdf",
                "size": 952068,
                "path": "Publication:mindek-2017-dsn",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-dsn/mindek-2017-dsn-Paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-dsn/mindek-2017-dsn-Paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-dsn/",
        "__class": "Publication"
    },
    {
        "id": "ERLER-2017-HVR",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Haptic Feedback in Room-Scale VR",
        "date": "2017-07-18",
        "abstract": "Virtual reality (VR) is now becoming a mainstream medium. Current systems like the HTC Vive offer accurate tracking of the HMD and controllers, which allows for highly immersive interactions with the virtual environment. The interactions can be further enhanced by adding feedback. As an example, a controller can vibrate when it is close to a grabbable ball. \n\nAs such interactions are not exhaustingly researched, we conducted a user study. Specifically, we examine:\n\n- grabbing and throwing with controllers in a simple basketball game.\n\n- the influence of haptic and optical feedback on performance, presence, task load, and usability.\n\n- the advantages of VR over desktop for point-cloud editing.\n\nSeveral new techniques emerged from the point-cloud editor for VR. The bi-manual pinch gesture, which extends the handlebar metaphor, is a novel viewing method used to translate, rotate, and scale the point-cloud. Our new rendering technique uses the geometry shader to draw sparse point clouds quickly. The selection volumes at the controllers are our new technique to efficiently select points in point clouds. The resulting selection is visualized in real time.\n\nThe results of the user study show that:\n\n- grabbing with a controller button is intuitive but throwing is not. Releasing a button is a bad metaphor for releasing a grabbed virtual object in order to throw it.\n\n- any feedback is better than none. Adding haptic, optical, or both feedback types to the grabbing improves the user performance and presence. However, only sub-scores like accuracy and predictability are significantly improved. Usability and task load are mostly unaffected by feedback.\n\n- the point-cloud editing is significantly better in VR with the bi-manual pinch gesture and selection volumes than on the desktop with the orbiting camera and lasso selections.\n",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "point cloud selection",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1600,
            "image_height": 900,
            "name": "ERLER-2017-HVR-point cloud selection.png",
            "type": "image/png",
            "size": 920489,
            "path": "Publication:ERLER-2017-HVR",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2017/ERLER-2017-HVR/ERLER-2017-HVR-point cloud selection.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/ERLER-2017-HVR/ERLER-2017-HVR-point cloud selection:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1395
        ],
        "date_end": "2017",
        "date_start": "2016",
        "diploma_examina": "2017-08-17",
        "matrikelnr": "01426424",
        "supervisor": [
            193,
            1116
        ],
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "virtual reality, room-scale VR, throwing, grabbing, physics, basketball, haptic feedback, optical feedback, controllers, point cloud, point-cloud editing, presence, performance, usability, task load"
        ],
        "weblinks": [
            {
                "href": "https://github.com/ErlerPhilipp/VR_DA",
                "caption": "VR Apps Repo",
                "description": "This is the repo containing the basketball game, the feedback tester and the point cloud editor.",
                "main_file": 1
            },
            {
                "href": "https://bitbucket.org/PhErler/surveyanalyzer/src/master/",
                "caption": "Analyzer Repo",
                "description": "This is the self-written tool to analyze the survey results statistically.",
                "main_file": 1
            },
            {
                "href": "https://philipperler.net/2017/06/13/haptic-feedback-in-room-scale-vr/",
                "caption": "P++ VR DA",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "point cloud selection",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1600,
                "image_height": 900,
                "name": "ERLER-2017-HVR-point cloud selection.png",
                "type": "image/png",
                "size": 920489,
                "path": "Publication:ERLER-2017-HVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/ERLER-2017-HVR/ERLER-2017-HVR-point cloud selection.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/ERLER-2017-HVR/ERLER-2017-HVR-point cloud selection:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "poster",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "ERLER-2017-HVR-poster.pdf",
                "type": "application/pdf",
                "size": 6221471,
                "path": "Publication:ERLER-2017-HVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/ERLER-2017-HVR/ERLER-2017-HVR-poster.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/ERLER-2017-HVR/ERLER-2017-HVR-poster:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "ERLER-2017-HVR-thesis.pdf",
                "type": "application/pdf",
                "size": 15825861,
                "path": "Publication:ERLER-2017-HVR",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/ERLER-2017-HVR/ERLER-2017-HVR-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/ERLER-2017-HVR/ERLER-2017-HVR-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend",
            "VRVis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2017/ERLER-2017-HVR/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2017-vph",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "Exploring Visual Prominence of Multi-Channel Highlighting in Visualizations",
        "date": "2017-05",
        "abstract": "Visualizations make rich use of multiple visual channels so that there are few resources left to make selected focus elements visually\ndistinct from their surrounding context. A large variety of highlighting techniques for visualizations has been presented in the past,\nbut there has been little systematic evaluation of the design space of highlighting. We explore highlighting from the perspective\nof visual marks and channels – the basic building blocks of visualizations that are directly controlled by visualization designers.\nWe present the results from two experiments, exploring the visual prominence of highlighted marks in scatterplots: First, using\nluminance as a single highlight channel, we found that visual prominence is mainly determined by the luminance difference between\nthe focus mark and the brightest context mark. The brightness differences between context marks and the overall brightness level\nhave negligible influence. Second, multi-channel highlighting using luminance and blur leads to a good trade-off between highlight\neffectiveness and aesthetics. From the results, we derive a simple highlight model to balance highlighting across multiple visual\nchannels and focus and context marks, respectively.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 160,
            "image_height": 124,
            "name": "waldner-2017-vph-.png",
            "type": "image/png",
            "size": 20447,
            "path": "Publication:waldner-2017-vph",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110,
            925,
            166
        ],
        "booktitle": "Spring Conference on Computer Graphics 2017",
        "lecturer": [
            1110
        ],
        "research_areas": [
            "InfoVis",
            "Perception"
        ],
        "keywords": [
            "information visualization",
            "highlighting",
            "focus+context",
            "visual prominence"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 160,
                "image_height": 124,
                "name": "waldner-2017-vph-.png",
                "type": "image/png",
                "size": 20447,
                "path": "Publication:waldner-2017-vph",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2017-vph-paper.pdf",
                "type": "application/pdf",
                "size": 1851221,
                "path": "Publication:waldner-2017-vph",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-paper:thumb{{size}}.png"
            },
            {
                "description": "Details about experiment design and results. ",
                "filetitle": "supplemental material",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2017-vph-supplemental material.pdf",
                "type": "application/pdf",
                "size": 1396255,
                "path": "Publication:waldner-2017-vph",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-supplemental material.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-supplemental material:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/",
        "__class": "Publication"
    },
    {
        "id": "Waldin_Nicholas_2017_FlickerObserver",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": null,
        "title": "Flicker Observer Effect: Guiding Attention Through High Frequency Flicker in Images",
        "date": "2017-05",
        "abstract": "Drawing the user's gaze to an important item in an image or a graphical user interface is a common challenge. Usually, some form of highlighting is used, such as a clearly distinct color or a border around the item. Flicker can also be very salient, but is often perceived as annoying. In this paper, we explore high frequency flicker (60 to 72 Hz) to guide the user's attention in an image. At such high frequencies, the critical flicker frequency (CFF) threshold is reached, which makes the flicker appear to fuse into a stable signal. However, the CFF is not uniform across the visual field, but is higher in the peripheral vision at normal lighting conditions. Through experiments, we show that high frequency flicker can be easily detected by observers in the peripheral vision, but the signal is hardly visible in the foveal vision when users directly look at the flickering patch. We demonstrate that this property can be used to draw the user's attention to important image regions using a standard high refresh-rate computer monitor with minimal visible modifications to the image. In an uncalibrated visual search task, users could in a crowded image easily spot the specified search targets flickering with very high frequency. They also reported that high frequency flicker was distracting when they had to attend to another region, while it was hardly noticeable when looking at the flickering region itself.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1461,
            1110,
            171
        ],
        "date_from": "2014",
        "date_to": "2017",
        "event": "Eurographics 2017",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1339
        ],
        "number": "2",
        "pages_from": "467",
        "pages_to": "476",
        "volume": "36",
        "research_areas": [
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "Waldin_Nicholas_2017_FlickerObserver-paper.pdf",
                "type": "application/pdf",
                "size": 6348247,
                "path": "Publication:Waldin_Nicholas_2017_FlickerObserver",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/Waldin_Nicholas_2017_FlickerObserver/Waldin_Nicholas_2017_FlickerObserver-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/Waldin_Nicholas_2017_FlickerObserver/Waldin_Nicholas_2017_FlickerObserver-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis",
            "deskollage",
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2017/Waldin_Nicholas_2017_FlickerObserver/",
        "__class": "Publication"
    },
    {
        "id": "Koszticsak-2017-ewt",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Generating Expressive Window Thumbnails through Seam Carving",
        "date": "2017",
        "abstract": "Thumbnails are used to display lists of open windows or tabs when switching between\nthem on computers and on mobile devices. These images make it easier to recognize the\nopened applications, and help to find the needed window quicker. Thumbnails however\nonly display a screenshot of the windows, so they get potentially confusing if there are\nmore opened windows or if the same application is opened multiple times. Depending\non the resolution of the display, the screenshot size decreases as the number of opened\nwindows increases. Furthermore, within the same application (like MS Office World)\nthe screenshots are similar in appearance (e.g. : white paper and tool bar), but the\nimportant text is not readable. There are several approaches that filter the important\nareas of the images to enhance the main region. In this bachelor thesis an application is\nimplemented that uses the above methods on screenshots. Screenshots of windows are\nreduced by cropping the irrelevant elements of the margin area using seam carving, i.e.\nby eliminating the non-important pixel paths; and by common down-sampling. As a\nresult the thumbnails show only relevant information, which makes them more expressive\nand easier to fulfill their purpose.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 858,
            "image_height": 481,
            "name": "Koszticsak-2017-ewt-.png",
            "type": "image/png",
            "size": 397429,
            "path": "Publication:Koszticsak-2017-ewt",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2017/Koszticsak-2017-ewt/Koszticsak-2017-ewt-.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/Koszticsak-2017-ewt/Koszticsak-2017-ewt-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1351
        ],
        "date_end": "2017-02",
        "date_start": "2016-06",
        "duration": "10 months",
        "matrikelnr": "1325492",
        "supervisor": [
            1110
        ],
        "research_areas": [
            "IllVis",
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 858,
                "image_height": 481,
                "name": "Koszticsak-2017-ewt-.png",
                "type": "image/png",
                "size": 397429,
                "path": "Publication:Koszticsak-2017-ewt",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/Koszticsak-2017-ewt/Koszticsak-2017-ewt-.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/Koszticsak-2017-ewt/Koszticsak-2017-ewt-:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "Koszticsak-2017-ewt-thesis.pdf",
                "type": "application/pdf",
                "size": 51784684,
                "path": "Publication:Koszticsak-2017-ewt",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/Koszticsak-2017-ewt/Koszticsak-2017-ewt-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/Koszticsak-2017-ewt/Koszticsak-2017-ewt-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2017/Koszticsak-2017-ewt/",
        "__class": "Publication"
    },
    {
        "id": "Groeller_2016_P7",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": null,
        "title": "Depth functions as a quality measure and for steering multidimensional projections",
        "date": "2016-11",
        "abstract": "The analysis of multidimensional data has been a topic of continuous research for many years.This type of data can be found inseveral different areas ofscience. \nThe analysis of multidimensional data has been a topic of continuous research for many years. This type of data can be found in several different areas of science. A common task while analyzing such data is to investigate patterns by interacting with spatializations of the data in a visual domain. Understanding the relation between the underlying dataset characteristics and the technique used to provide its visual representation is of fundamental importance since it can provide a better intuition on what to expect from the spatialization. In this paper, we propose the usage of concepts from non-parametric statistics, namely depth functions, as a quality measure for spatializations. We evaluate the action of multi-dimensional projection techniques on such estimates. We apply both qualitative and quantitative ana-lyses on four different multidimensional techniques selected according to the properties they aim to preserve. We evaluate them with datasets of different characteristics: synthetic, real world, high dimensional; and contaminated with outliers. As a straightforward application, we propose to use depth information to guide multidimensional projection techniques which rely on interaction through control point selection and positioning. Even for techniques which do not intend to preserve any centrality measure, interesting results can be achieved by separating regions possibly contaminated with outliers.\n",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": true,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 453,
            "image_height": 362,
            "name": "Groeller_2016_P7-image.PNG",
            "type": "image/png",
            "size": 28144,
            "path": "Publication:Groeller_2016_P7",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P7/Groeller_2016_P7-image.PNG",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P7/Groeller_2016_P7-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1408,
            1195,
            1413,
            166,
            1414,
            1415
        ],
        "issn": "doi: 10.1016/j.cag.2016.08.008",
        "journal": "Computers & Graphics (Special Section on SIBGRAPI 2016)",
        "lecturer": [
            1416
        ],
        "pages_from": "93",
        "pages_to": "106",
        "volume": "60",
        "research_areas": [
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 453,
                "image_height": 362,
                "name": "Groeller_2016_P7-image.PNG",
                "type": "image/png",
                "size": 28144,
                "path": "Publication:Groeller_2016_P7",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P7/Groeller_2016_P7-image.PNG",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P7/Groeller_2016_P7-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "Groeller_2016_P7-Paper.pdf",
                "type": "application/pdf",
                "size": 2203763,
                "path": "Publication:Groeller_2016_P7",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P7/Groeller_2016_P7-Paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P7/Groeller_2016_P7-Paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P7/",
        "__class": "Publication"
    },
    {
        "id": "Reichinger-2016-spaghetti",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "Spaghetti, Sink and Sarcophagus: Design Explorations of Tactile Artworks for Visually Impaired People",
        "date": "2016-10",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            879,
            190
        ],
        "booktitle": "Proceedings of the 9th Nordic Conference on CHI 2016",
        "date_from": "2016",
        "event": "9th Nordic Conference on CHI 2016",
        "lecturer": [
            879
        ],
        "research_areas": [
            "Fabrication",
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [
            "VRVis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Reichinger-2016-spaghetti/",
        "__class": "Publication"
    },
    {
        "id": "bernhard-2016-gft",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": null,
        "title": " The Accuracy of Gauge-Figure Tasks in Monoscopic and Stereo Displays",
        "date": "2016-07",
        "abstract": "The gauge-figure task (GFT) is a widespread method used to study surface perception for evaluating rendering and visualization techniques. The authors investigate how accurately slant angles probed on well-defined objects align with the ground truth (GT) in monoscopic and stereoscopic displays. Their results show that the GFT probes taken with well-defined objects align well with the GT in the all-monoscopic and all-stereoscopic conditions. However, they found that a GF rendered in stereo over a monoscopic stimulus results in a strong slant underestimation and that an overestimation occurred in the inverse case (monoscopic GF andstereoscopic stimulus). They discuss how their findings affect the interpretation of absolute GFT measures, compared to the GT normal.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 216,
            "image_height": 205,
            "name": "bernhard-2016-gft-.jpg",
            "type": "image/jpeg",
            "size": 38475,
            "path": "Publication:bernhard-2016-gft",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/bernhard-2016-gft-.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/bernhard-2016-gft-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            660,
            1110,
            1293,
            896,
            171
        ],
        "journal": "IEEE Computer Graphics and Applications",
        "number": "4",
        "pages_from": "56",
        "pages_to": "66",
        "volume": "36",
        "research_areas": [
            "Perception"
        ],
        "keywords": [
            "computer graphics",
            "gauge-figure task",
            "perceptual visualization",
            "shape perception"
        ],
        "weblinks": [
            {
                "href": "http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7478440",
                "caption": "IEEE Xplore",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 216,
                "image_height": 205,
                "name": "bernhard-2016-gft-.jpg",
                "type": "image/jpeg",
                "size": 38475,
                "path": "Publication:bernhard-2016-gft",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/bernhard-2016-gft-.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/bernhard-2016-gft-:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/",
        "__class": "Publication"
    },
    {
        "id": "viola-evr",
        "type_id": "habilthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Effective Visual Representations",
        "date": "2016",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 728,
            "image_height": 612,
            "name": "viola-evr-image.jpg",
            "type": "image/jpeg",
            "size": 43401,
            "path": "Publication:viola-evr",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2016/viola-evr/viola-evr-image.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/viola-evr/viola-evr-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            171
        ],
        "research_areas": [
            "IllVis",
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 728,
                "image_height": 612,
                "name": "viola-evr-image.jpg",
                "type": "image/jpeg",
                "size": 43401,
                "path": "Publication:viola-evr",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2016/viola-evr/viola-evr-image.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/viola-evr/viola-evr-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "viola-evr-thesis.pdf",
                "type": "application/pdf",
                "size": 35963729,
                "path": "Publication:viola-evr",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2016/viola-evr/viola-evr-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/viola-evr/viola-evr-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2016/viola-evr/",
        "__class": "Publication"
    },
    {
        "id": "Waldin_Nicholas_2016_Colormaps",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": null,
        "title": "Personalized 2D color maps",
        "date": "2016",
        "abstract": "2D color maps are often used to visually encode complex data characteristics such as heat or height. The comprehension of color maps in visualization is affected by the display (e.g., a monitor) and the perceptual abilities of the viewer. In this paper we present a novel method to measure a user׳s ability to distinguish colors of a two-dimensional color map on a given monitor. We show how to adapt the color map to the user and display to optimally compensate for the measured deficiencies. Furthermore, we improve user acceptance of the calibration procedure by transforming the calibration into a game. The user has to sort colors along a line in a 3D color space in a competitive fashion. The errors the user makes in sorting these lines are used to adapt the color map to his perceptual capabilities.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1461,
            660,
            171
        ],
        "date_from": "2016-10",
        "issn": "0097-8493",
        "journal": "Computers & Graphics",
        "pages_from": "143",
        "pages_to": "150",
        "volume": "59",
        "research_areas": [
            "IllVis",
            "Perception"
        ],
        "keywords": [
            "Color; Perception",
            "Perception",
            "Color vision deficiency"
        ],
        "weblinks": [
            {
                "href": "http://www.sciencedirect.com/science/article/pii/S0097849316300772",
                "caption": null,
                "description": null,
                "main_file": 0
            }
        ],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Colormaps/",
        "__class": "Publication"
    },
    {
        "id": "Waldin_Nicholas_2016_Individualization",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "Individualization of 2D Color Maps for People with Color Vision Deficiencies",
        "date": "2016",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": true,
            "use_in_gallery": false,
            "access": "public",
            "name": "Waldin_Nicholas_2016_Individualization-.pdf",
            "type": "application/pdf",
            "size": 3812707,
            "path": "Publication:Waldin_Nicholas_2016_Individualization",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Individualization/Waldin_Nicholas_2016_Individualization-.pdf",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Individualization/Waldin_Nicholas_2016_Individualization-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1461,
            660,
            488,
            171
        ],
        "booktitle": "Proceedings of the 32Nd Spring Conference on Computer Graphics",
        "date_from": "2016",
        "date_to": "2016",
        "lecturer": [
            1339
        ],
        "location": "Slomenice, Slovakia",
        "research_areas": [
            "Perception"
        ],
        "keywords": [],
        "weblinks": [
            {
                "href": "http://dl.acm.org/citation.cfm?id=2948643",
                "caption": null,
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "Waldin_Nicholas_2016_Individualization-.pdf",
                "type": "application/pdf",
                "size": 3812707,
                "path": "Publication:Waldin_Nicholas_2016_Individualization",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Individualization/Waldin_Nicholas_2016_Individualization-.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Individualization/Waldin_Nicholas_2016_Individualization-:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Individualization/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2014-af",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": null,
        "title": " Attractive Flicker: Guiding Attention in Dynamic Narrative Visualizations",
        "date": "2014-12",
        "abstract": "Focus+context techniques provide visual guidance in visualizations by giving strong visual prominence to elements of interest while the context is suppressed. However, finding a visual feature to enhance for the focus to pop out from its context in a large dynamic scene, while leading to minimal visual deformation and subjective disturbance, is challenging. This paper proposes Attractive Flicker, a novel technique for visual guidance in dynamic narrative visualizations. We first show that flicker is a strong visual attractor in the entire visual field, without distorting, suppressing, or adding any scene elements. The novel aspect of our Attractive Flicker technique is that it consists of two signal stages: The first “orientation stage” is a short but intensive flicker stimulus to attract the attention to elements of interest. Subsequently, the intensive flicker is reduced to a minimally disturbing luminance oscillation (“engagement stage”) as visual support to keep track of the focus elements. To find a good trade-off between attraction effectiveness and subjective annoyance caused by flicker, we conducted two perceptual studies to find suitable signal parameters. We showcase Attractive Flicker with the parameters obtained from the perceptual statistics in a study of molecular interactions. With Attractive Flicker, users were able to easily follow the narrative of the visualization on a large display, while the flickering of focus elements was not disturbing when observing the context.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 600,
            "image_height": 605,
            "name": "waldner-2014-af-.png",
            "type": "image/png",
            "size": 232715,
            "path": "Publication:waldner-2014-af",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110,
            1189,
            660,
            190,
            171
        ],
        "date_from": "2014-11-09",
        "date_to": "2014-11-14",
        "event": "IEEE VIS 2014",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "lecturer": [
            1110
        ],
        "location": "Paris, France",
        "number": "12",
        "pages_from": "2456",
        "pages_to": "2465",
        "volume": "20",
        "research_areas": [
            "BioVis",
            "Perception"
        ],
        "keywords": [
            "Narrative Visualization",
            "Flicker",
            "Visual Attention"
        ],
        "weblinks": [
            {
                "href": "http://dx.doi.org/10.1109/TVCG.2014.2346352",
                "caption": null,
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 600,
                "image_height": 605,
                "name": "waldner-2014-af-.png",
                "type": "image/png",
                "size": 232715,
                "path": "Publication:waldner-2014-af",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-:thumb{{size}}.png"
            },
            {
                "description": "Screenshot of the large molecular scene used in the final experiment",
                "filetitle": "molecularScene",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 7597,
                "image_height": 1327,
                "name": "waldner-2014-af-molecularScene.png",
                "type": "image/png",
                "size": 3805356,
                "path": "Publication:waldner-2014-af",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-molecularScene.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-molecularScene:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2014-af-paper.pdf",
                "type": "application/pdf",
                "size": 6413298,
                "path": "Publication:waldner-2014-af",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Preview video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "preview_image_width": 1280,
                "preview_image_height": 720,
                "name": "waldner-2014-af-Preview video.mp4",
                "type": "video/mp4",
                "size": 16497882,
                "path": "Publication:waldner-2014-af",
                "preview_name": "waldner-2014-af-Preview video:preview.png",
                "preview_type": "image/jpeg",
                "preview_size": 14964,
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-Preview video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-Preview video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-Preview video:video.mp4"
            },
            {
                "description": null,
                "filetitle": "Submission video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "preview_image_width": 1280,
                "preview_image_height": 720,
                "name": "waldner-2014-af-Submission video.mp4",
                "type": "video/mp4",
                "size": 23904054,
                "path": "Publication:waldner-2014-af",
                "preview_name": "waldner-2014-af-Submission video:preview.png",
                "preview_type": "image/jpeg",
                "preview_size": 48929,
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-Submission video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-Submission video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-Submission video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/",
        "__class": "Publication"
    },
    {
        "id": "birkeland_aasmund_2014_pums",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": null,
        "title": "Perceptually Uniform Motion Space",
        "date": "2014-11",
        "abstract": "Flow data is often visualized by animated particles inserted into a ?ow ?eld. The velocity of a particle on the screen is typically linearly scaled by the velocities in the data. However, the perception of velocity magnitude in animated particles is not necessarily linear. We present a study on how different parameters affect relative motion perception. We have investigated the impact of four parameters. The parameters consist of speed multiplier, direction, contrast type and the global velocity scale. In addition, we investigated if multiple motion cues, and point distribution, affect the speed estimation. Several studies were executed to investigate the impact of each parameter. In the initial results, we noticed trends in scale and multiplier. Using the trends for the signi?cant parameters, we designed a compensation model, which adjusts the particle speed to compensate for the effect of the parameters. We then performed a second study to investigate the performance of the compensation model. From the second study we detected a constant estimation error, which we adjusted for in the last study. In addition, we connect our work to established theories in psychophysics by comparing our model to a model based on Stevens’ Power Law.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 663,
            "image_height": 663,
            "name": "birkeland_aasmund_2014_pums-.png",
            "type": "image/png",
            "size": 568432,
            "path": "Publication:birkeland_aasmund_2014_pums",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2014/birkeland_aasmund_2014_pums/birkeland_aasmund_2014_pums-.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/birkeland_aasmund_2014_pums/birkeland_aasmund_2014_pums-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1007,
            1144,
            171
        ],
        "date_from": "2014-11-09",
        "date_to": "2014-11-14",
        "event": "IEEE VIS 2014",
        "issn": "1077-2626",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "lecturer": [
            1007
        ],
        "location": "IEEE VIS 2014",
        "number": "11",
        "pages_from": "1542",
        "pages_to": "1554",
        "volume": "20",
        "research_areas": [
            "Perception"
        ],
        "keywords": [
            "motion visualization",
            "motion perception",
            "animation",
            "evauation",
            "perceptual model"
        ],
        "weblinks": [
            {
                "href": "http://dx.doi.org/10.1109/TVCG.2014.2322363",
                "caption": null,
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 663,
                "image_height": 663,
                "name": "birkeland_aasmund_2014_pums-.png",
                "type": "image/png",
                "size": 568432,
                "path": "Publication:birkeland_aasmund_2014_pums",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/birkeland_aasmund_2014_pums/birkeland_aasmund_2014_pums-.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/birkeland_aasmund_2014_pums/birkeland_aasmund_2014_pums-:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "birkeland_aasmund_2014_pums-paper.pdf",
                "type": "application/pdf",
                "size": 5605137,
                "path": "Publication:birkeland_aasmund_2014_pums",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/birkeland_aasmund_2014_pums/birkeland_aasmund_2014_pums-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/birkeland_aasmund_2014_pums/birkeland_aasmund_2014_pums-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2014/birkeland_aasmund_2014_pums/",
        "__class": "Publication"
    },
    {
        "id": "bernhard-2014-GTOM",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": null,
        "title": "Gaze-To-Object Mapping During Visual Search in 3D Virtual Environments ",
        "date": "2014-08",
        "abstract": "Stimuli obtained from highly dynamic 3D virtual environments and synchronous eye-tracking data are commonly used by algorithms that strive to correlate gaze to scene objects, a process referred to as Gaze-To-Object Mapping (GTOM). We propose to address this problem with a probabilistic approach using Bayesian inference. The desired result of the inference is a predicted probability density function (PDF) specifying for each object in the scene a probability to be attended by the user. To evaluate the quality of a predicted attention PDF, we present a methodology to assess the information value (i.e., likelihood) in the predictions of dierent approaches that can be used to infer object attention. To this end, we propose an experiment based on a visual search task which allows us to determine the object of attention at a certain point in time under controlled conditions.\nWe perform this experiment with a wide range of static and dynamic visual scenes to obtain a ground-truth evaluation data set, allowing us to assess GTOM techniques in a set of 30 particularly challenging cases.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "thumnail",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 703,
            "image_height": 314,
            "name": "bernhard-2014-GTOM-thumnail.png",
            "type": "image/png",
            "size": 151264,
            "path": "Publication:bernhard-2014-GTOM",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/bernhard-2014-GTOM-thumnail.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/bernhard-2014-GTOM-thumnail:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            660,
            731,
            779,
            193
        ],
        "date_from": "2014-08-08",
        "date_to": "2014-08-09",
        "event": "ACM Symposium on Applied Perception (SAP 2014)",
        "issn": "1544-3558",
        "journal": "ACM Transactions on Applied Perception (Special Issue SAP 2014)",
        "lecturer": [
            660
        ],
        "location": "Vancouver, Canada",
        "number": "3",
        "pages_from": "14:1",
        "pages_to": "14:17",
        "volume": "11",
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "object-based attention",
            "eye-tracking",
            "virtual environments",
            "visual attention"
        ],
        "weblinks": [
            {
                "href": "http://dl.acm.org/citation.cfm?doid=2663596.2644812",
                "caption": null,
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "draft",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "bernhard-2014-GTOM-draft.pdf",
                "type": "application/pdf",
                "size": 9514669,
                "path": "Publication:bernhard-2014-GTOM",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/bernhard-2014-GTOM-draft.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/bernhard-2014-GTOM-draft:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thumnail",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 703,
                "image_height": 314,
                "name": "bernhard-2014-GTOM-thumnail.png",
                "type": "image/png",
                "size": 151264,
                "path": "Publication:bernhard-2014-GTOM",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/bernhard-2014-GTOM-thumnail.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/bernhard-2014-GTOM-thumnail:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "preview_image_width": 1024,
                "preview_image_height": 768,
                "name": "bernhard-2014-GTOM-video.mp4",
                "type": "video/mp4",
                "size": 29837759,
                "path": "Publication:bernhard-2014-GTOM",
                "preview_name": "bernhard-2014-GTOM-video:preview.png",
                "preview_type": "image/jpeg",
                "preview_size": 12852,
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/bernhard-2014-GTOM-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/bernhard-2014-GTOM-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/bernhard-2014-GTOM-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "Mofa",
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/",
        "__class": "Publication"
    },
    {
        "id": "hecher-2014-MH",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": null,
        "title": "A Comparative Perceptual Study of Soft Shadow Algorithms",
        "date": "2014-06",
        "abstract": "We performed a perceptual user study of algorithms that approximate soft shadows in real time. Although a huge body of soft-shadow algorithms have been proposed, to our knowledge this is the first methodical study for comparing different real-time shadow algorithms with respect to their plausibility and visual appearance. We evaluated soft-shadow properties like penumbra overlap with respect to their relevance to shadow perception in a systematic way, and we believe that our results can be useful to guide future shadow approaches in their methods of evaluation. In this study, we also capture the predominant case of an inexperienced user observing shadows without comparing to a reference solution, such as when watching a movie or playing a game. One important result of this experiment is to scientifically verify that real-time soft-shadow algorithms, despite having become physically based and very realistic, can nevertheless be intuitively distinguished from a correct solution by untrained users.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 978,
            "image_height": 850,
            "name": "hecher-2014-MH-image.png",
            "type": "image/png",
            "size": 245534,
            "path": "Publication:hecher-2014-MH",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/hecher-2014-MH-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/hecher-2014-MH-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            779,
            660,
            326,
            452,
            193
        ],
        "issn": "1544-3558",
        "journal": "ACM Transactions on Applied Perception",
        "number": "5",
        "pages_from": "5:1",
        "pages_to": "5:21",
        "volume": "11",
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "Perception Studies",
            "Soft Shadows"
        ],
        "weblinks": [
            {
                "href": "http://doi.acm.org/10.1145/2620029",
                "caption": null,
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "draft",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "hecher-2014-MH-draft.pdf",
                "type": "application/pdf",
                "size": 7731118,
                "path": "Publication:hecher-2014-MH",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/hecher-2014-MH-draft.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/hecher-2014-MH-draft:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 978,
                "image_height": 850,
                "name": "hecher-2014-MH-image.png",
                "type": "image/png",
                "size": 245534,
                "path": "Publication:hecher-2014-MH",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/hecher-2014-MH-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/hecher-2014-MH-image:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "Mofa"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/",
        "__class": "Publication"
    },
    {
        "id": "bernhard-2014-EFD",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "The Effects of Fast Disparity Adjustments in Gaze-Controlled Stereoscopic Applications",
        "date": "2014-03",
        "abstract": "With the emergence of affordable 3D displays, stereoscopy is becoming\na commodity. However, often users report discomfort even after brief exposures to stereo content. One of the main reasons is the conflict between vergence and accommodation that is caused by 3D displays. We investigate dynamic adjustment of stereo parameters\nin a scene using gaze data in order to reduce discomfort. In a user study, we measured stereo fusion times after abrupt manipulation of disparities using gaze data. We found that gaze-controlled manipulation of disparities can lower fusion times for large disparities. In addition we found that gaze-controlled disparity adjustment should be applied in a personalized manner and ideally performed only at the extremities or outside the comfort zone of subjects.\nThese results provide important insight on the problems associated with fast disparity manipulation and are essential for developing appealing gaze-contingent and gaze-controlled applications.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "teaser",
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 1216,
            "image_height": 332,
            "name": "bernhard-2014-EFD-image.png",
            "type": "image/png",
            "size": 160276,
            "path": "Publication:bernhard-2014-EFD",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-EFD/bernhard-2014-EFD-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-EFD/bernhard-2014-EFD-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            660,
            1017,
            779,
            731,
            193
        ],
        "booktitle": "Proceedings of the Symposium on Eye Tracking Research and Applications (ETRA 2014)",
        "date_from": "2014-03-26",
        "date_to": "2014-03-28",
        "editor": "Pernilla Qvarfordt and Dan Witzner Hansen",
        "isbn": "978-1-4503-2751-0",
        "lecturer": [
            660
        ],
        "location": "Safety Harbor, FL, USA",
        "pages_from": "111",
        "pages_to": "118",
        "publisher": "ACM",
        "research_areas": [
            "Perception"
        ],
        "keywords": [
            "stereoscopic rendering",
            "comfort models",
            "fusion time",
            "eye tracking"
        ],
        "weblinks": [],
        "files": [
            {
                "description": "conference paper",
                "filetitle": "draft",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "bernhard-2014-EFD-draft.pdf",
                "type": "application/pdf",
                "size": 4234907,
                "path": "Publication:bernhard-2014-EFD",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-EFD/bernhard-2014-EFD-draft.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-EFD/bernhard-2014-EFD-draft:thumb{{size}}.png"
            },
            {
                "description": "teaser",
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 1216,
                "image_height": 332,
                "name": "bernhard-2014-EFD-image.png",
                "type": "image/png",
                "size": 160276,
                "path": "Publication:bernhard-2014-EFD",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-EFD/bernhard-2014-EFD-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-EFD/bernhard-2014-EFD-image:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "PAMINA"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-EFD/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2013-facetCloudsGI",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "FacetClouds: Exploring Tag Clouds for Multi-Dimensional Data",
        "date": "2013-05",
        "abstract": "Tag clouds are simple yet very widespread representations of how often certain words appear in a collection. In conventional tag clouds, only a single visual text variable is actively controlled: the tags’ font size. Previous work has demonstrated that font size is indeed the most influential visual text variable. However, there are other variables, such as text color, font style and tag orientation, that could be manipulated to encode additional data dimensions.\n\nFacetClouds manipulate intrinsic visual text variables to encode multiple data dimensions within a single tag cloud. We conducted a series of experiments to detect the most appropriate visual text variables for encoding nominal and ordinal values in a cloud with tags of varying font size. Results show that color is the most expressive variable for both data types, and that a combination of tag rotation and background color range leads to the best overall performance when showing multiple data dimensions in a single tag cloud.\n",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 362,
            "image_height": 360,
            "name": "waldner-2013-facetCloudsGI-image.png",
            "type": "image/png",
            "size": 49904,
            "path": "Publication:waldner-2013-facetCloudsGI",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/waldner-2013-facetCloudsGI-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/waldner-2013-facetCloudsGI-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110,
            1111,
            1112,
            1113,
            1114,
            1115
        ],
        "address": "Regina, Saskatchewan, Canada",
        "booktitle": "Proceedings of the 2013 Graphics Interface Conference",
        "date_from": "2013-05-29",
        "date_to": "2013-05-31",
        "isbn": "978-1-4822-1680-6 ",
        "lecturer": [
            1111
        ],
        "location": "Regina, Saskatchewan, Canada",
        "organization": "ACM Siggraph",
        "pages_from": "17",
        "pages_to": "24",
        "publisher": "ACM Publishing House",
        "research_areas": [
            "InfoVis",
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 362,
                "image_height": 360,
                "name": "waldner-2013-facetCloudsGI-image.png",
                "type": "image/png",
                "size": 49904,
                "path": "Publication:waldner-2013-facetCloudsGI",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/waldner-2013-facetCloudsGI-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/waldner-2013-facetCloudsGI-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "preview_image_width": 636,
                "preview_image_height": 708,
                "name": "waldner-2013-facetCloudsGI-paper.pdf",
                "type": "application/pdf",
                "size": 2184510,
                "path": "Publication:waldner-2013-facetCloudsGI",
                "preview_name": "waldner-2013-facetCloudsGI-paper:preview.png",
                "preview_type": "image/png",
                "preview_size": 189512,
                "url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/waldner-2013-facetCloudsGI-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/waldner-2013-facetCloudsGI-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/",
        "__class": "Publication"
    },
    {
        "id": "sundstedt-2013-vag",
        "type_id": "inbook",
        "tu_id": null,
        "repositum_id": null,
        "title": "Visual Attention and Gaze Behaviour in Games: An Object-Based Approach",
        "date": "2013-04",
        "abstract": "This chapter presents state-of-the-art methods that tap the potential of psychophysics for the purpose of understanding game players' behavior. Studying gaze behavior in gaming environments has recently gained momentum as it affords a better understanding of gamers' visual attention. However, while knowing where users are attending in a computer game would be useful at a basic level, it does not provide insight into what users are interested in, or why. An answer to these questions can be tremendously useful to game designers, enabling them to improve gameplay, selectively increase visual fidelity, and optimize the distribution of computing resources. Furthermore, this could be useful in verifying game mechanics, improving game AI and smart positioning of advertisements within games, all being applications widely desirable across the games industry. Techniques are outlined to collect gaze data, and map fixation points back to semantic objects in a gaming environment, enabling a deeper understanding of how players interact with games. ",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 958,
            "image_height": 338,
            "name": "sundstedt-2013-vag-image.png",
            "type": "image/png",
            "size": 82333,
            "path": "Publication:sundstedt-2013-vag",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2013/sundstedt-2013-vag/sundstedt-2013-vag-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2013/sundstedt-2013-vag/sundstedt-2013-vag-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            730,
            660,
            731,
            732,
            193
        ],
        "booktitle": "Game Analytics: Maximizing the Value of Player Data ",
        "editor": "M. Seif El-Nasr, A. Drachen, A. Canossa, K. Isbister,",
        "isbn": "9781447147688",
        "pages_from": "543",
        "pages_to": "583",
        "publisher": "Springer",
        "research_areas": [
            "Perception"
        ],
        "keywords": [
            "Eye Tracking",
            "Visual Attention",
            "Computer Games"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 958,
                "image_height": 338,
                "name": "sundstedt-2013-vag-image.png",
                "type": "image/png",
                "size": 82333,
                "path": "Publication:sundstedt-2013-vag",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2013/sundstedt-2013-vag/sundstedt-2013-vag-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2013/sundstedt-2013-vag/sundstedt-2013-vag-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Paper",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "sundstedt-2013-vag-Paper.pdf",
                "type": "application/pdf",
                "size": 1343680,
                "path": "Publication:sundstedt-2013-vag",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2013/sundstedt-2013-vag/sundstedt-2013-vag-Paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2013/sundstedt-2013-vag/sundstedt-2013-vag-Paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "GPV"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2013/sundstedt-2013-vag/",
        "__class": "Publication"
    },
    {
        "id": "hecher-2012-MH",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "A Comparative Perceptual Study of Soft Shadow Algorithms",
        "date": "2012-10",
        "abstract": "While a huge body of soft shadow algorithms has been proposed, there has been no methodical study for comparing different real-time shadowing algorithms with respect to their plausibility and visual appearance. Therefore, a study was designed to identify and evaluate scene properties with respect to their relevance to shadow quality perception. Since there are so many factors that might influence perception of soft shadows (e.g., complexity of objects, movement, and textures), the study was designed and executed in a way on which future work can build on. The evaluation concept not only captures the predominant case of an untrained user experiencing shadows without comparing them to a reference solution, but also the cases of trained and experienced users. We achieve this by reusing the knowledge users gain during the study. Moreover, we thought that the common approach of a two-option forced-choice-study can be frustrating for participants when both choices are so similar that people think they are the same. To tackle this problem a neutral option was provided. For time-consuming studies, where frustrated participants tend to arbitrary choices, this is a useful concept. Speaking with participants after the study and evaluating the results, supports our choice for a third option. The results are helpful to guide the design of future shadow algorithms and allow researchers to evaluate algorithms more effectively. They also allow developers to make better performance versus quality decisions for their applications. One important result of this study is that we can scientifically verify that, without comparison to a reference solution, the human perception is relatively indifferent to a correct soft shadow. Hence, a simple but robust soft shadow algorithm is the better choice in real-world situations. Another finding is that approximating contact hardening in soft shadows is sufficient for the average user and not significantly worse for experts.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "The block design of our study.",
            "filetitle": "Image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 2689,
            "image_height": 1489,
            "name": "hecher-2012-MH-Image.jpg",
            "type": "image/jpeg",
            "size": 262303,
            "path": "Publication:hecher-2012-MH",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2012/hecher-2012-MH/hecher-2012-MH-Image.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2012/hecher-2012-MH/hecher-2012-MH-Image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            779
        ],
        "date_start": "2012-01-01",
        "diploma_examina": "2012-10-09",
        "matrikelnr": "0625134",
        "supervisor": [
            193
        ],
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "Perception Studies",
            "Soft Shadows"
        ],
        "weblinks": [],
        "files": [
            {
                "description": "The block design of our study.",
                "filetitle": "Image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 2689,
                "image_height": 1489,
                "name": "hecher-2012-MH-Image.jpg",
                "type": "image/jpeg",
                "size": 262303,
                "path": "Publication:hecher-2012-MH",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2012/hecher-2012-MH/hecher-2012-MH-Image.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2012/hecher-2012-MH/hecher-2012-MH-Image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "poster",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "preview_image_width": 180,
                "preview_image_height": 256,
                "name": "hecher-2012-MH-poster.pdf",
                "type": "application/pdf",
                "size": 3821052,
                "path": "Publication:hecher-2012-MH",
                "preview_name": "hecher-2012-MH-poster:preview.png",
                "preview_type": "image/png",
                "preview_size": 71741,
                "url": "https://www.cg.tuwien.ac.at/research/publications/2012/hecher-2012-MH/hecher-2012-MH-poster.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2012/hecher-2012-MH/hecher-2012-MH-poster:thumb{{size}}.png"
            },
            {
                "description": "A Comparative Perceptual Study of Soft Shadow Algorithms.pdf",
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "preview_image_width": 2689,
                "preview_image_height": 1489,
                "name": "hecher-2012-MH-thesis.pdf",
                "type": "application/pdf",
                "size": 6912872,
                "path": "Publication:hecher-2012-MH",
                "preview_name": "hecher-2012-MH-thesis:preview.png",
                "preview_type": "image/jpeg",
                "preview_size": 262303,
                "url": "https://www.cg.tuwien.ac.at/research/publications/2012/hecher-2012-MH/hecher-2012-MH-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2012/hecher-2012-MH/hecher-2012-MH-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2012/hecher-2012-MH/",
        "__class": "Publication"
    },
    {
        "id": "bernhard-2011-maicg",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "Manipulating Attention in Computer Games",
        "date": "2011-06",
        "abstract": "In computer games, a user’s attention is focused on the current task, and task-irrelevant details remain unnoticed. This behavior, known as inattentional blindness, is a main problem for the optimal placement of information or advertisements. We propose a guiding principle based on Wolfe’s theory of Guided Search, which predicts the saliency of objects during a visual search task. Assuming that computer games elicit visual search tasks frequently, we applied this model in a “reverse” direction: Given a target item (e.g., advertisement) which should be noticed by the user, we choose a frequently searched game item and modify it so that it shares some perceptual features (e.g., color or orientation) with the target item. A memory experiment with 36 participants showed that in an action video game, advertisements were more noticeable to users when this method is applied.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            660,
            839,
            193
        ],
        "booktitle": "Proceedings of the IEEE IVMSP Workshop on Perception and Visual Signal Analysis",
        "date_from": "2011-06-16",
        "date_to": "2011-06-17",
        "isbn": "9781457712852",
        "lecturer": [
            660
        ],
        "location": "Ithaca, NY",
        "pages_from": "153",
        "pages_to": "158",
        "publisher": "IEEE",
        "research_areas": [
            "Perception"
        ],
        "keywords": [
            "saliency",
            "attention guidance",
            "inattentional blindness",
            "in-game advertising",
            "guided search"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "bernhard-2011-maicg-paper.pdf",
                "type": "application/pdf",
                "size": 2337253,
                "path": "Publication:bernhard-2011-maicg",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2011/bernhard-2011-maicg/bernhard-2011-maicg-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2011/bernhard-2011-maicg/bernhard-2011-maicg-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "presentation",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "bernhard-2011-maicg-presentation.ppt",
                "type": "application/vnd.ms-office",
                "size": 12238336,
                "path": "Publication:bernhard-2011-maicg",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2011/bernhard-2011-maicg/bernhard-2011-maicg-presentation.ppt",
                "thumb_image_sizes": []
            }
        ],
        "projects_workgroups": [
            "GPV"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2011/bernhard-2011-maicg/",
        "__class": "Publication"
    }
]
