[
    {
        "id": "VRVis",
        "workgroup_id": "External",
        "drupal_id": 4155,
        "drupal_path": "/research/projects/VRVis",
        "name": "VRVis Competence Center",
        "name_de": null,
        "short_title": "VRVis",
        "website": null,
        "status": "active",
        "short_abstract": "<p>The <a href=\"http://www.vrvis.at/\">VRVis K1 Research Center</a> is the leading application oriented research center in the area of virtual reality (VR) and visualization (Vis) in Austria and is internationally recognized. You can find extensive Information about the VRVis-Center <a href=\"http://www.vrvis.at/about\">here</a></p>\r\n",
        "abstract": "<p>The <a href=\"http://www.vrvis.at/\">VRVis K1 Research Center</a> is the leading application oriented research center in the area of virtual reality (VR) and visualization (Vis) in Austria and is internationally recognized. You can find extensive Information about the VRVis-Center <a href=\"http://www.vrvis.at/about\">here</a></p>\r\n",
        "start_date": "2000-01-01",
        "end_date": null,
        "leader_id": 190,
        "logo": {
            "name": "logo.png",
            "path": "project:VRVis",
            "type": "image/png",
            "size": 3275,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/VRVis/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "K1",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "Ahmed-2008-EGVE",
            "Bauer_Dominik_2018_VR",
            "Berger_2011_UAE",
            "brunnhuber-2010-behavior-handicapped",
            "Cai_2018",
            "CORNEL-2014-AFRS",
            "CORNEL-2017-FRS",
            "CORNEL-2019-IVF",
            "cornel-2022-wih",
            "donabauer_2019_1",
            "Eibner-12",
            "ERLER-2017-HVR",
            "Freiler-2008-Set",
            "Gan-2011-ICU",
            "Ganuza",
            "Ganuza-2017-IVCSGM",
            "ilcik-2008-sgfa",
            "Kathi-2018-VRB",
            "kavelar_albert_2010_ebic",
            "Kellner-2021-DA",
            "kim_2009_iPhone",
            "Kim-2008_FWIP",
            "KOEPPEL-2025-Nearfield",
            "Konev-FCV2018",
            "konyha_2006_int",
            "Konyha_2007_SCV",
            "Konyha_2009_survey",
            "kroesl-2017-LiteMaker",
            "kroesl-2018-DC",
            "kroesl-2018-TVS",
            "kroesl-2019-ICthroughVR",
            "Laramee-2004-thesis",
            "ludwig-2012-MT",
            "luksch_2020",
            "LUKSCH-2014-RTR",
            "LUKSCH-2019-IGI",
            "Mantler-2011-GEAR",
            "matkovic_2009_flickr",
            "matkovic-2007-clv",
            "Matkovic-2008-Bergen",
            "Matkovic-2008-ComVis",
            "Matkovic-2008-med",
            "Matkovic-2008-Steer",
            "matkovic-2010-eventlineview",
            "Matkovic-2014-ieee",
            "May_Michael_2015_DIS",
            "MEINDL-2015-OSR",
            "Miklin-2009-Mig",
            "Muehlbacher_diss_2018",
            "musialski-2009-sbfr",
            "musialski-2010-imv",
            "musialski-2010-pfi",
            "musialski-2010-tof",
            "north-2025-aog",
            "ortner-2016-tunnel",
            "ortner-2016-visaware",
            "PB-VRVis-2018-005",
            "Pfahler-2016-MT",
            "PROST-2017-RTL",
            "PROST-2019-RTPAL",
            "Przemyslaw-2010-ADS",
            "Purg2015-b",
            "Purg2015-c",
            "Purg2015-China",
            "Purg2015-d",
            "purgathofer_2016I1",
            "Purgathofer-2013-cvws",
            "Purgathofer-2014-ETH",
            "Purgathofer-2014-LSM",
            "Purgathofer-2014-PanEu",
            "Purgathofer-2014-Rio",
            "Purgathofer-2015-WA",
            "Purgathofer-2017-China1",
            "Purgathofer-2017-China2",
            "Purgathofer-2017-VC-Interface",
            "Purgathofer-2018-EG",
            "Rainer_2017",
            "Reichinger_2016",
            "Reichinger_Fuhrmann_2016",
            "Reichinger-2011-Tac",
            "Reichinger-2016-spaghetti",
            "Reichinger-2018-TAC",
            "reisner-2010-1dh",
            "reisner-2010-ind",
            "reisner-2011-akgeo",
            "reisner-2011-comdm",
            "reisner-2012-iwssip",
            "SCHOBER-2006-IVSA",
            "schuetz-2019-CLOD",
            "schwaerzler_2018_phd",
            "SCHWAERZLER-2017-SBGM",
            "Shi_2007_PathLines",
            "Singh-2008-Metric",
            "sorger-2013-neuromap",
            "sorger-2015-litevis",
            "sorger-2015-taxintec",
            "sorger-2017-thesis",
            "Spechtenhauser_Florian_2016",
            "STEINLECHNER-2019-APS",
            "STEINLECHNER-2019-ICT",
            "STREICHER-2022-DSLS",
            "Szabo-2018-DA",
            "Trobec-2008-Heart",
            "Walch-2017-DA",
            "Wimmer_Maria_2015_SAS",
            "Zusag-2017-Bach"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/VRVis",
        "__class": "Project"
    },
    {
        "id": "xmas",
        "workgroup_id": "abteilung",
        "drupal_id": 4153,
        "drupal_path": "/research/projects/xmas",
        "name": "X-Mas Cards",
        "name_de": null,
        "short_title": "xmas",
        "website": null,
        "status": "active",
        "short_abstract": "Every year a christmas card showing aspects of our research projects is produced and sent out.",
        "abstract": "<p>Every year a christmas card showing aspects of our research projects is produced and sent out.</p>\r\n",
        "start_date": "1993-12-01",
        "end_date": null,
        "leader_id": 190,
        "logo": {
            "name": "logo.png",
            "path": "project:xmas",
            "type": "image/png",
            "size": 315425,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/xmas/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [],
        "publications": [
            "dvdkouril_xmas_card_2018",
            "freude-2015-xmc",
            "Habel_07_xms",
            "ilcik-2021-xmas",
            "jeschke-2011-xMasCard",
            "kroesl_x_card_2017",
            "mindek-xmas-card-2014",
            "mindek-xmas-card-2016",
            "ohrhallinger_stefan-2019-xms",
            "Rau08",
            "SCHEIBLAUER-2009-xmas",
            "x-mas-2013",
            "xmas-1993",
            "xmas-1994",
            "xmas-1995",
            "xmas-1996",
            "xmas-1997",
            "xmas-1998",
            "xmas-1999",
            "xmas-2000",
            "xmas-2001",
            "xmas-2002",
            "xmas-2003",
            "xmas-2004",
            "xmas-2005",
            "xmas-2006",
            "xmas-2010",
            "xmas-2012",
            "xmas-2022",
            "xmas-2023",
            "xmas-2024",
            "xmastree2002",
            "xmastree2005"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/xmas",
        "__class": "Project"
    },
    {
        "id": "d10381",
        "workgroup_id": "rend",
        "drupal_id": 10381,
        "drupal_path": "/research/projects/Palaces-Pixels",
        "name": "Palaces to Pixels: Advanced 3D Reconstruction for Heritage",
        "name_de": "Paläste zu Pixeln: 3D-Rekonstruktion für das Kulturerbe",
        "short_title": "Palaces to Pixels",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2026-05-01",
        "end_date": "2030-04-30",
        "leader_id": 948,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Palaces-Pixels",
        "__class": "Project"
    },
    {
        "id": "d10450",
        "workgroup_id": "vis",
        "drupal_id": 10450,
        "drupal_path": "/research/projects/ENVIRON-HYDRO",
        "name": "Environmental Hydropower",
        "name_de": "Environmental Hydropower",
        "short_title": "ENVIRON-HYDRO",
        "website": null,
        "status": "active",
        "short_abstract": "<p><strong>ENVIRON-HYDRO: Environmental Hydropower</strong></p><p>The <em>ENVIRON-HYDRO</em> project addresses the growing challenges faced by hydropower plants due to climate change and evolving regulatory frameworks. Austria generates around 70% of its electricity from hydropower, making it one of the most hydropower-dependent countries in Europe. However, many existing plants are aging and are increasingly vulnerable to hydrological changes, extreme weather events, and environmental pressures. ENVIRON-HYDRO aims to develop a hybrid data-driven model that integrates internal operational data from hydropower plants with external environmental parameters, enabling more accurate diagnostics, performance assessments, and long-term operational forecasts. This will lay the foundation for resilient, climate-adaptive, and ecologically compatible hydropower systems.</p><p>At its core, the project focuses on integrating high-frequency sensor data (e.g., vibration, pressure, temperature) with external, typically low-frequency environmental data (e.g., weather patterns, sediment transport, snowmelt). This integrated approach enables the discovery of relationships between plant performance, environmental behavior, and climatic trends - connections that have remained largely unexplored in current practice. <em>ENVIRON-HYDRO</em> thus goes beyond previous research efforts that rely on isolated data streams. At the same time, it tackles key challenges in data fusion, such as differences in resolution, data format, and semantic context.</p><p>The project applies methods from cross-domain data fusion and probabilistic modeling (e.g., Bayesian networks) to combine these heterogeneous data sources into a meaningful framework. The result is a virtual, adaptive representation of hydropower operation under real-world environmental conditions. These models will be used to identify the impacts of seasonal and long-term climate changes and provide actionable recommendations for future operations.</p>",
        "abstract": "<p><strong>ENVIRON-HYDRO: Environmental Hydropower</strong></p><p>The <em>ENVIRON-HYDRO</em> project addresses the growing challenges faced by hydropower plants due to climate change and evolving regulatory frameworks. Austria generates around 70% of its electricity from hydropower, making it one of the most hydropower-dependent countries in Europe. However, many existing plants are aging and are increasingly vulnerable to hydrological changes, extreme weather events, and environmental pressures. ENVIRON-HYDRO aims to develop a hybrid data-driven model that integrates internal operational data from hydropower plants with external environmental parameters, enabling more accurate diagnostics, performance assessments, and long-term operational forecasts. This will lay the foundation for resilient, climate-adaptive, and ecologically compatible hydropower systems.</p><p>At its core, the project focuses on integrating high-frequency sensor data (e.g., vibration, pressure, temperature) with external, typically low-frequency environmental data (e.g., weather patterns, sediment transport, snowmelt). This integrated approach enables the discovery of relationships between plant performance, environmental behavior, and climatic trends - connections that have remained largely unexplored in current practice. <em>ENVIRON-HYDRO</em> thus goes beyond previous research efforts that rely on isolated data streams. At the same time, it tackles key challenges in data fusion, such as differences in resolution, data format, and semantic context.</p><p>The project applies methods from cross-domain data fusion and probabilistic modeling (e.g., Bayesian networks) to combine these heterogeneous data sources into a meaningful framework. The result is a virtual, adaptive representation of hydropower operation under real-world environmental conditions. These models will be used to identify the impacts of seasonal and long-term climate changes and provide actionable recommendations for future operations.</p>",
        "start_date": "2026-02-01",
        "end_date": "2029-01-31",
        "leader_id": 950,
        "logo": {
            "name": "logo.jpg",
            "path": "project:d10450",
            "type": "image/jpeg",
            "size": 32432,
            "orig_name": "logo.jpg",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/d10450/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [
            "InfoVis"
        ],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/ENVIRON-HYDRO",
        "__class": "Project"
    },
    {
        "id": "d10162",
        "workgroup_id": "vis",
        "drupal_id": 10162,
        "drupal_path": "/research/projects/VRVis-GF-2025-2028",
        "name": "Visual Computing-Grundlagenforschung",
        "name_de": "Visual Computing-Grundlagenforschung",
        "short_title": "VRVis-GF-2025-2028",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": "<p>Die TU-Wien bearbeitet für VRVis relevante Forschungsfragen im Bereich Visual Computing und stellt die Ergebnisse dem VRVis zur weiteren Verwendung uneingeschränkt zur Verfügung. Die TU-Wien wird die Arbeiten mit branchenüblicher Sorgfalt erbringen. Aufgrund des Forschungscharakters der Arbeiten ist eine Gewährleistung für das Erreichen der angestrebten Forschungsziele oder für eine bestimmte Beschaffenheit der Resultate, insbesondere für deren wirtschaftliche oder rechtliche Verwertbarkeit, ausgeschlossen.</p>",
        "start_date": "2025-01-01",
        "end_date": "2028-12-31",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/VRVis-GF-2025-2028",
        "__class": "Project"
    },
    {
        "id": "d10489",
        "workgroup_id": "vis",
        "drupal_id": 10489,
        "drupal_path": "/research/projects/H4DES",
        "name": "Haptic 4D Printing for Enhanced Sensemaking",
        "name_de": "Haptic 4D Printing for Enhanced Sensemaking",
        "short_title": "H4DES",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2026-01-01",
        "end_date": "2028-12-31",
        "leader_id": 1410,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/H4DES",
        "__class": "Project"
    },
    {
        "id": "d9951",
        "workgroup_id": "vis",
        "drupal_id": 9951,
        "drupal_path": "/research/projects/TARGET",
        "name": "Health virtual twins for the personalised management of stroke related to atrial fibrillation",
        "name_de": "Health virtual twins for the personalised management of stroke related to atrial fibrillation",
        "short_title": "TARGET",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": "<p>Atrial fibrillation (AF) is the most common heart arrhythmia worldwide, leading to life-limiting complications, high financial burden and significant resource utilisation. In Europe, stroke as a debilitating complication of AF, is amongst the commonest causes of death and the leading cause of disability. AF patients have a 5-fold increased risk for ischaemic stroke. Functional recovery from AF-related stroke (AFRS) is often unsatisfactory, leading to severe disability, reduced quality of life and high mortality. TARGET’s ambition is to develop novel personalised, integrated, multi-scale computational models (virtual twins) and decision- support tools for the AF-related stroke pathway, starting from the healthy state, pathophysiology and disease onset, progression, treatment and recovery. TARGET aims to help prevent AF and AFRS, optimise acute management and rehabilitation, reduce long- term disability, provide a better quality of life for patients and caregivers, and lower healthcare costs. We will ensure patients are at the heart of the project, and the association with experienced commercial partners will ensure the swift adoption of TARGET’s novel technologies. New observational data will be collected via 4 carefully designed prospective clinical studies, which will be used to test and validate the personalised tools and the virtual twin models using a clinical trial simulation (virtual/in-silico), to demonstrate evidence of clinically meaningful results. TARGET will also help consolidate existing mechanistic virtual twin models of the heart, the brain and the neuromusculoskeletal system, enriching these twins to deliver more complex tasks, and supporting research to move towards a more integrated human virtual twin. TARGET represents a milestone project to improve the care and rehabilitation of patients with AF and AFRS, introducing a paradigm shift in risk prediction, diagnosis and management of the disease, and accelerating translational research into practice.</p>",
        "start_date": "2024-01-01",
        "end_date": "2028-12-31",
        "leader_id": 1410,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [
            "MedVis"
        ],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/TARGET",
        "__class": "Project"
    },
    {
        "id": "d4314",
        "workgroup_id": "rend",
        "drupal_id": 4314,
        "drupal_path": "/research/projects/ACD",
        "name": "Advanced Computational Design",
        "name_de": "Advanced Computational Design",
        "short_title": "ACD",
        "website": "http://acd.tuwien.ac.at",
        "status": "active",
        "short_abstract": null,
        "abstract": "<p><em style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\">Research question:</em><span style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\"> The main research question addressed by the SFB </span><strong style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\">“</strong><em style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\">Advanced Computational Design</em><strong style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\">”</strong><span style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\"> is how to advance design tools and processes through multi- and interdisciplinary basic research in the areas of digital architecture, integrated building design, computer graphics and virtual reality, discrete and applied geometry, and computational mechanics.&nbsp;</span></p>\n<p><em style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\">Wider research context:</em><span style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\"> Architecture, Engineering and Construction (AEC) shapes our built environment, exerting substantial environmental, cultural and economic influence on society. However, it is among the least digitized industries, still caught in silo-thinking and sequential planning processes. The Information and Communication Technology field, on the other hand, is highly innovative, creating digital design tools that are well-founded in basic research, but often lack relevant domain knowledge, thus hardly meeting designers’ needs.</span></p>\n<p><em style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\">Innovation:</em><span style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\"> We will connect architecture, computer science, mathematics and engineering in order to develop advanced computational design tools able to incorporate implicit and explicit design knowledge. This unique combination of scientific disciplines and collaborative research with strong theoretical foundations aims to bring radical innovation in computational design by allowing immediate feedback already in early design phases and by expanding solution spaces by computing design variants that cannot be found by current methods.</span></p>\n<p><em style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\">Approach:</em><span style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\"> The proposed research is structured in three areas: Design Methodology (A1), Visual and Haptic Design Interaction (A2) and Form Finding (A3). A1 uses ontologies to describe AEC design semantics, coupled to a novel digital mixed-reality sketchbook, and an innovative implicit modeling approach based on retrieving and enhancing 2D images and 3D point clouds through transfer learning. A1 also acts as a platform for integrating and evaluating the computational tools and methods developed in A2 and A3. These areas will investigate research questions in computational design involving algorithmic solutions. A2 investigates real-time global-illumination and optimization algorithms for lighting design, as well as a new method for large-scale haptic interactions in virtual reality based on a mobile robotic platform. In A3, form finding will be explored regarding geometric, mechanical and material constraints, in particular: paneling of complex shapes by patches of certain surface classes while optimizing the number of molds; algorithms for finding new transformable quad-surfaces; mechanical models for an efficient simulation of bio-composite material systems. Furthermore, new ways of form-finding will be explored experimentally, which will allow for validating the developed algorithmic approaches and reconsidering model assumptions and constraints.</span></p>\n<p><em style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\">Added value:</em><span style=\"background-color:transparent;color:rgb( 0 , 0 , 0 )\"> The fundamental computational tools and methods to be developed will be applicable in AEC and other fields of the creative industries. </span></p>",
        "start_date": "2020-03-01",
        "end_date": "2028-02-29",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [
            "Derflinger-2026-RTTW2",
            "ecormier-nocca-2025-sls",
            "Erhardt_2025_Scutes",
            "Erhardt_Seirlehner-2024-TamashiiIntegration",
            "freude-2023-prh",
            "freude-2023-sem",
            "freude-2025-iso",
            "Fuericht-2026-FPD",
            "Grisenti-2026-HSHPG",
            "huber-2025-esl",
            "koenigsberger-2024-msu",
            "Koeppl-2023-DLO",
            "kovacs-2023-ttm",
            "landauer-2022-kido",
            "LIPP-2020-PHD",
            "lipp-2024-val",
            "Meier_2024_WIALT",
            "mortezapoor-2022-photogrammabot",
            "Mortezapoor2021ssc",
            "Preymann-2022-pytamashii",
            "rasoulzadeh-2022-strokes2surface",
            "rasoulzadeh-2024-strokes2surface",
            "reisinger-2023-iad",
            "Schwengerer_Mathias-2022-BGL",
            "steinkellner-2022-dll",
            "ulschmid-2024-reo",
            "ulschmid-2025-apc",
            "unterguggenberger-2023-vaw",
            "wimmer-2022-acd",
            "wimmer-2023-acd",
            "Zezulka_Matthias-2023-MatLabOptInterface",
            "Zezulka-2025-NRL",
            "Zezulka-2026-FGTS",
            "Zezulka-2026-GridThermalSim",
            "zhang-thesis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/ACD",
        "__class": "Project"
    },
    {
        "id": "d10164",
        "workgroup_id": "vis",
        "drupal_id": 10164,
        "drupal_path": "/research/projects/ClimaSens",
        "name": "Climate-sensitive Adaptive Planning for Shaping Resilient Cities",
        "name_de": "Climate-sensitive Adaptive Planning for Shaping Resilient Cities",
        "short_title": "ClimaSens",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": "<p>This project aims to realize an interactive scenario-based simulation framework with integrated microclimate simulation, data analysis, and visualization for decision support in urban planning. We want to enable workflows for planning mitigation measures interactively during a live session. This requires significant advancements in the execution of and interaction with simulation scenarios and insightful visualization of the resulting data, which we will research in this project.</p> \n<p>One goal of this project is designing a highly efficient urban microclimate model system and its integration into a dynamic data flow system enabling interactive simulation steering. We seek to adapt PALM-4U, the state-of-the-art climate model system PALM 6.0 for applications in urban areas, to multi-GPU systems in order to combine its high reliability and functionality with the high computational performance needed for interactive workflows. For even higher performance in tasks that do not require the highest accuracy, such as parameter space exploration, we aim at a surrogate model based on a neural network to provide near-instant feedback.</p>",
        "start_date": "2024-01-01",
        "end_date": "2027-12-31",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [
            "huber-2025-eed"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/ClimaSens",
        "__class": "Project"
    },
    {
        "id": "d9522",
        "workgroup_id": "vis",
        "drupal_id": 9522,
        "drupal_path": "/research/projects/VaCoViCu2",
        "name": "Visual Analytics and Computer Vision meet Cultural Heritage",
        "name_de": "Visuelle Analytik und Computer Vision treffen auf kulturelles Erbe",
        "short_title": "VaCoViCu2",
        "website": "http://www.visual-heritage.at",
        "status": "active",
        "short_abstract": null,
        "abstract": "<p><strong>Wider research context/theoretical framework</strong></p>\n<p>In the last decades, archives and museums made extensive efforts to foster the digital preservation of cultural heritage (CH) artifacts such as historical photographs and amateur films. These collections of digitized artifacts offer immense potential to increase the knowledge of our heritage by investigating patterns and relationships. The systematic analysis and presentation of these large amounts of visual media data are, however, still strongly limited due to the lack of metadata and suitable analysis approaches. This impedes historians and film archivists but also lay users in analyzing, interpreting, and thereby preserving the memories of human cultural history.</p>\n<p><strong>Hypotheses/research questions/objectives</strong></p>\n<p>Our cooperative doctoral programme aims to close basic and applied research gaps by combining computer vision (CV) and visual analytics (VA). We jointly work on the advancement of interactive analysis, exploration, and presentation of historical visual media collections. The central hypothesis behind this is that combining CV and VA is necessary to unlock large cultural heritage collections and open them to a wide range of users.&nbsp;</p>\n<p><strong>Approach/methods</strong></p>\n<p>We follow a problem-driven research paradigm determined by the cultural heritage domain. We leverage human-in-the-loop machine learning (ML) and integrate CV and VA methods for the analysis of cultural objects and their relationships across space and time. This includes ML, information retrieval and CV for analysis as well as situated visualization and storytelling for exploration and presentation. We quantitatively evaluate the results against ground truth annotations and through user studies.</p>\n<p><strong>Faculty</strong></p>\n<p>The project is a collaboration of researchers from St. Pölten University of Applied Sciences (FHSTP) and TU Wien. Seven faculty members contribute complementary research expertise on CV, VA, and CH across basic and applied research. The project shall elevate the existing scholarly ties between FHSTP and TU Wien&nbsp;to a new level of inter-institutional cooperation.&nbsp;</p>\n<p><strong>Doctoral programme</strong></p>\n<p>The doctoral programme foresees 5 thematically related PhDs. The topics focus on the interactive analysis of visual media in cultural heritage collections and complement each other in terms of methods, use cases, and addressed users. Each topic is supervised by a team of 2-3 researchers (at least one from each institution), and a comprehensive education and training programme tailored to research is provided.</p>\n<p><strong>Added value</strong></p>\n<p>Added value is generated at multiple levels: 1) embedding doctoral students in a team of high profile visual computing researchers; 2) increasing junior faculty members’ experience in supervision skills; 3) leveraging the complementary strengths of applied and basic research; 4) sharing of resources between institutions; 5) opening up large cultural heritage collections to a wide range of users by combining CV and VA.</p>",
        "start_date": "2023-10-01",
        "end_date": "2027-09-30",
        "leader_id": 1110,
        "logo": {
            "name": "logo.png",
            "path": "project:d9522",
            "type": "image/png",
            "size": 17914,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/d9522/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [
            "InfoVis"
        ],
        "publications": [
            "aigner-2025-vhv",
            "tekaya-2025-amo"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/VaCoViCu2",
        "__class": "Project"
    },
    {
        "id": "d10070",
        "workgroup_id": "rend",
        "drupal_id": 10070,
        "drupal_path": "/research/projects/GreenFDT",
        "name": "Green Facade Digital Twin",
        "name_de": "Green Facade Digital Twin",
        "short_title": "GreenFDT",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2025-01-13",
        "end_date": "2027-07-12",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [
            "stauss-2025-gfd"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/GreenFDT",
        "__class": "Project"
    },
    {
        "id": "d10169",
        "workgroup_id": "rend",
        "drupal_id": 10169,
        "drupal_path": "/research/projects/Room-Imagineer",
        "name": "Erfassungs- und Modellierungstool zur akustischen, visuellen und räumlichen Gestaltung hybrider Meetingräume",
        "name_de": "Erfassungs- und Modellierungstool zur akustischen, visuellen und räumlichen Gestaltung hybrider Meetingräume",
        "short_title": "Room Imagineer",
        "website": null,
        "status": "active",
        "short_abstract": "<p>The goal of this project is to research automated design for meeting and conference rooms. The main items are desks, chairs, audio and video equipment. Based on a 3D model of a room provided at input, at TU Wien we investigate concepts for:</p><ul><li>seating arrangement</li><li>screen placement</li><li>camera positioning</li><li>selection of hardware properties</li></ul><p>The choices are interconnected, influencing each other. Our plan is to filter the available space for placement options and propose a metric representing the setup regarding the visibility of meeting participants and expected picture quality. The best design will be presented at the output.</p><p>We will approach this problem as a multi-variate optimization in discrete space. Collisions and visibility will rule out many combinations. Further ergonomic constraints will narrow down the space of feasible solutions. The candidate setups will be judged by the metric to select the most suitable for the given shape and parameters of the input room.</p>",
        "abstract": "<p>The goal of this project is to research automated design for meeting and conference rooms. The main items are desks, chairs, audio and video equipment. Based on a 3D model of a room provided at input, at TU Wien we investigate concepts for:</p><ul><li>seating arrangement</li><li>screen placement</li><li>camera positioning</li><li>selection of hardware properties</li></ul><p>The choices are interconnected, influencing each other. Our plan is to filter the available space for placement options and propose a metric representing the setup regarding the visibility of meeting participants and expected picture quality. The best design will be presented at the output.</p><p>We will approach this problem as a multi-variate optimization in discrete space. Collisions and visibility will rule out many combinations. Further ergonomic constraints will narrow down the space of feasible solutions. The candidate setups will be judged by the metric to select the most suitable for the given shape and parameters of the input room.</p>",
        "start_date": "2024-06-01",
        "end_date": "2027-05-31",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Room-Imagineer",
        "__class": "Project"
    },
    {
        "id": "d9259",
        "workgroup_id": "rend",
        "drupal_id": 9259,
        "drupal_path": "/research/projects/PathGuideWWTF",
        "name": "Toward Optimal Path Guiding for Photorealistic Rendering",
        "name_de": "Toward Optimal Path Guiding for Photorealistic Rendering",
        "short_title": "PathGuideWWTF",
        "website": "https://www.cg.tuwien.ac.at/research/projects/PathGuideWWTF",
        "status": "active",
        "short_abstract": null,
        "abstract": "<p>Photorealistic rendering, which is essential for many applications, involves evaluating complicated integrals. These integrals are predominantly approximated with a Monte Carlo method called path tracing. Unfortunately, this method generally requires many samples and therefore long computation times for noiseless images. Recently, so-called path-guiding techniques have been introduced, which gather knowledge about the integrands to facilitate sampling. In this project, we seek to identify and exploit untapped potential in path guiding to provide significant improvements in this active line of research. The two principal problems in path guiding are i) representing gathered knowledge and ii) sampling using that representation. We strive toward the ideal of optimal path guiding (considering practical constraints), which leads us to our main research questions: Q1. How can we represent path-guiding knowledge optimally? Q2. How can we sample optimally using that representation? We pursue two novel key ideas for possible answers: For Q1, we cast the problem of representing knowledge as a mathematical optimization problem, enabling us to exploit the knowledge and methods from the field of optimization. For Q2, we introduce “scalable product importance sampling”: adapting sampling accuracy as required to economize on resources. We see great potential for synergies between the two ideas and for them to open up new lines of investigation, thereby inspiring many follow-up works.</p>",
        "start_date": "2023-02-01",
        "end_date": "2027-01-31",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:d9259",
            "type": "image/png",
            "size": 3185530,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/d9259/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [
            "Rendering"
        ],
        "publications": [
            "eickmeyer-2027-tcd",
            "freude-2023-prh",
            "freude-2025-iso",
            "hasbay-2026-tfo",
            "sakai-2024-asa",
            "sakai-2025-stater"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/PathGuideWWTF",
        "__class": "Project"
    },
    {
        "id": "d9275",
        "workgroup_id": "vis",
        "drupal_id": 9275,
        "drupal_path": "/research/projects/IVILPC",
        "name": "Instant Visualization and Interaction for Large Point Clouds",
        "name_de": null,
        "short_title": "IVILPC",
        "website": null,
        "status": "active",
        "short_abstract": "<p class=\"text-align-justify\">Point clouds are a quintessential 3D geometry representation format, and often the first model obtained from reconstructive efforts, such as LIDAR scans. IVILPC aims for fast, authentic, interactive, and high-quality processing of such point-based data sets. Our project explores high-performance software rendering routines for various point-based primitives, such as point sprites, gaussian splats, surfels, and particle systems. Beyond conventional use cases, point cloud rendering also forms a key component of point-based machine learning methods and novel-view synthesis, where performance is paramount. We will exploit the flexibility and processing power of cutting-edge GPU architecture features to formulate novel, high-performance rendering approaches. The envisioned solutions will be applicable to unstructured point clouds for instant rendering of billions of points. Our research targets minimally-invasive compression, culling methods, and level-of-detail techniques for point-based rendering to deliver high performance and quality on-demand. We explore GPU-accelerated editing of point clouds, as well as common display issues on next-generation display devices. IVILPC lays the foundation for interaction with large point clouds in conventional and immersive environments. Its goal is an efficient data knowledge transfer from sensor to user, with a wide range of use cases to image-based rendering, virtual reality (VR) technology, architecture, the geospatial industry, and cultural heritage.</p>\r\n",
        "abstract": "<p class=\"text-align-justify\">Point clouds are a quintessential 3D geometry representation format, and often the first model obtained from reconstructive efforts, such as LIDAR scans. IVILPC aims for fast, authentic, interactive, and high-quality processing of such point-based data sets. Our project explores high-performance software rendering routines for various point-based primitives, such as point sprites, gaussian splats, surfels, and particle systems. Beyond conventional use cases, point cloud rendering also forms a key component of point-based machine learning methods and novel-view synthesis, where performance is paramount. We will exploit the flexibility and processing power of cutting-edge GPU architecture features to formulate novel, high-performance rendering approaches. The envisioned solutions will be applicable to unstructured point clouds for instant rendering of billions of points. Our research targets minimally-invasive compression, culling methods, and level-of-detail techniques for point-based rendering to deliver high performance and quality on-demand. We explore GPU-accelerated editing of point clouds, as well as common display issues on next-generation display devices. IVILPC lays the foundation for interaction with large point clouds in conventional and immersive environments. Its goal is an efficient data knowledge transfer from sensor to user, with a wide range of use cases to image-based rendering, virtual reality (VR) technology, architecture, the geospatial industry, and cultural heritage.</p>\r\n",
        "start_date": "2023-07-02",
        "end_date": "2026-07-01",
        "leader_id": 166,
        "logo": {
            "name": "logo.png",
            "path": "project:d9275",
            "type": "image/png",
            "size": 268858,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/d9275/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [
            "Geometry",
            "Rendering",
            "VR"
        ],
        "publications": [
            "erler-2025-lidarscout",
            "goel-2024-rdr",
            "kerbl-2024-ah3",
            "papantonakis-2024-rmf",
            "SCHUETZ-2024-SIMLOD",
            "schuetz-2025-splatshop",
            "ulschmid-2024-reo",
            "ulschmid-2025-apc",
            "unterguggenberger-2024-fropo",
            "voglreiter-2023-tro"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/IVILPC",
        "__class": "Project"
    },
    {
        "id": "d10214",
        "workgroup_id": "vis",
        "drupal_id": 10214,
        "drupal_path": "/research/projects/Alpenite",
        "name": "High-performance triangle-mesh streaming for unbounded datasets.",
        "name_de": "High-performance triangle-mesh streaming for unbounded datasets.",
        "short_title": "Alpenite",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2025-01-01",
        "end_date": "2026-06-30",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Alpenite",
        "__class": "Project"
    },
    {
        "id": "d10206",
        "workgroup_id": "vis",
        "drupal_id": 10206,
        "drupal_path": "/research/projects/EINSATZBILD",
        "name": "Overview assessment of current and planned ambulance services",
        "name_de": "Gesamtbeurteilung von aktuellen und geplanten Einsätzen",
        "short_title": "EINSATZBILD+",
        "website": null,
        "status": "active",
        "short_abstract": "<p><strong>EINSATZBILD+: Overview assessment of current and planned ambulance services</strong></p><p>The aim of the project <em>EINSATZBILD+</em> is to support dispatchers working for the Vienna Red Cross in their daily work. One of their main responsibility is to dispatch available cars to booked positions to pick up patients. We will develop a new user interface employing geospatial visualization to support this task. In addition, an evaluation of the new virtual dispatch board in real-world operation is planned in order to gain new insights into the use of user interfaces in emergency dispatch centres.</p>",
        "abstract": "<p><strong>EINSATZBILD+: Overview assessment of current and planned ambulance services</strong></p><p>The aim of the project <em>EINSATZBILD+</em> is to support dispatchers working for the Vienna Red Cross in their daily work. One of their main responsibility is to dispatch available cars to booked positions to pick up patients. We will develop a new user interface employing geospatial visualization to support this task. In addition, an evaluation of the new virtual dispatch board in real-world operation is planned in order to gain new insights into the use of user interfaces in emergency dispatch centres.</p>",
        "start_date": "2025-05-01",
        "end_date": "2026-04-30",
        "leader_id": 950,
        "logo": {
            "name": "logo.png",
            "path": "project:d10206",
            "type": "image/png",
            "size": 3037423,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/d10206/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [
            "InfoVis"
        ],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/EINSATZBILD",
        "__class": "Project"
    },
    {
        "id": "d10413",
        "workgroup_id": "vis",
        "drupal_id": 10413,
        "drupal_path": "/research/projects/RisikoBlick",
        "name": "Proactive planning in water supply",
        "name_de": "Vorrauschauende Planung in der Wasserversorgung",
        "short_title": "RisikoBlick",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": "<p>The aim of the RisikoBlick project is to develop a new user interface that enables both the current situation and forward-looking planning of water supply in specific regions. Work on the RisikoBlick application is based on developments from the EINSATZBILD+ project.</p>",
        "start_date": "2026-02-01",
        "end_date": "2026-04-30",
        "leader_id": 950,
        "logo": {
            "name": "logo.jpg",
            "path": "project:d10413",
            "type": "image/jpeg",
            "size": 28325,
            "orig_name": "logo.jpg",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/d10413/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [
            "InfoVis"
        ],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/RisikoBlick",
        "__class": "Project"
    },
    {
        "id": "d9282",
        "workgroup_id": "vis",
        "drupal_id": 9282,
        "drupal_path": "/research/projects/JDE",
        "name": "Joint Human-Machine Data Exploration",
        "name_de": "Joint Human-Machine Data Exploration",
        "short_title": "JDE",
        "website": "https://www.cg.tuwien.ac.at/research/projects/JDE",
        "status": "active",
        "short_abstract": "<h4>Wider research context</h4><p>In many domains, such as biology, chemistry, medicine, and the humanities, <strong>large amounts of data</strong> exist. <strong>Visual exploratory analysis</strong> of these data is often not practicable due to their <strong>size </strong>and their <strong>unstructured</strong> nature. Traditional machine learning (ML) requires large-scale labeled training data and a clear target definition, which is typically not available when exploring unknown data. For such <strong>large-scale, unstructured, open-ended, and domain-specific problems</strong>, we need an interactive approach <strong>combining the strengths of ML and human analytical skills</strong> into a unified process that helps users to \"detect the expected and discover the unexpected\".&nbsp;</p><h4>Hypotheses</h4><p>We hypothesize that <strong>humans and machines can learn jointly from the data and from each other during exploratory data analysis</strong>. We further hypothesize that this joint learning enables a new visual analytics approach that reveals how users' incrementally growing insights fit the data, which will <strong>foster questioning and reframing</strong>.&nbsp;</p><h4>Approach</h4><p>We <strong>integrate interactive ML and interactive visualization</strong> to learn about data and from data in a joint fashion. To this end, we propose a <strong>data-agnostic joint human-machine data exploration (JDE) framework</strong> that supports users in the exploratory analysis and the discovery of meaningful structures in the data. In contrast to existing approaches, we investigate data exploration from a new perspective that focuses on the <strong>discovery and definition of complex structural information from the data</strong> rather than primarily on the model (as in ML) or on the data itself (as in visualization).</p><h4>Innovation</h4><p>First, the conceptual framework of JDE introduces a novel <strong>knowledge modeling</strong> approach for visual analytics based on interactive ML that incrementally captures potentially complex, yet interpretable concepts that users expect or have learned from the data. Second, it proposes an <strong>intelligent agent</strong> that elicits information fitting the users' expectations and discovers what may be unexpected for the users. Third, it relies on a new <strong>visualization </strong>approach focusing on how the large-scale data fits the users' knowledge and expectations, rather than solely the data. Fourth, this leads to <strong>novel exploratory data analysis techniques</strong> -- an interactive interplay between knowledge externalization, machine-guided data inspection, questioning, and reframing.</p><h4>Primary researchers involved</h4><p>The project is a joint collaboration between researchers from <strong>TU Wien</strong> (<a href=\"https://www.cg.tuwien.ac.at/users/waldner\">Manuela Waldner</a>) and the <strong>University of Applied Sciences St. Pölten</strong> (<a href=\"https://www.fhstp.ac.at/de/uber-uns/mitarbeiter-innen-a-z/zeppelzauer-matthias\">Matthias Zeppelzauer</a>), Austria, who contribute and join their complementary expertise on information visualization, visual analytics, and interactive ML.</p><p>&nbsp;</p><p>FWF Stand-alone project&nbsp;P 36453</p><p>DOI: <a href=\"https://doi.org/10.55776/P36453\">10.55776/P36453</a></p>",
        "abstract": "<h4>Wider research context</h4><p>In many domains, such as biology, chemistry, medicine, and the humanities, <strong>large amounts of data</strong> exist. <strong>Visual exploratory analysis</strong> of these data is often not practicable due to their <strong>size </strong>and their <strong>unstructured</strong> nature. Traditional machine learning (ML) requires large-scale labeled training data and a clear target definition, which is typically not available when exploring unknown data. For such <strong>large-scale, unstructured, open-ended, and domain-specific problems</strong>, we need an interactive approach <strong>combining the strengths of ML and human analytical skills</strong> into a unified process that helps users to \"detect the expected and discover the unexpected\".&nbsp;</p><h4>Hypotheses</h4><p>We hypothesize that <strong>humans and machines can learn jointly from the data and from each other during exploratory data analysis</strong>. We further hypothesize that this joint learning enables a new visual analytics approach that reveals how users' incrementally growing insights fit the data, which will <strong>foster questioning and reframing</strong>.&nbsp;</p><h4>Approach</h4><p>We <strong>integrate interactive ML and interactive visualization</strong> to learn about data and from data in a joint fashion. To this end, we propose a <strong>data-agnostic joint human-machine data exploration (JDE) framework</strong> that supports users in the exploratory analysis and the discovery of meaningful structures in the data. In contrast to existing approaches, we investigate data exploration from a new perspective that focuses on the <strong>discovery and definition of complex structural information from the data</strong> rather than primarily on the model (as in ML) or on the data itself (as in visualization).</p><h4>Innovation</h4><p>First, the conceptual framework of JDE introduces a novel <strong>knowledge modeling</strong> approach for visual analytics based on interactive ML that incrementally captures potentially complex, yet interpretable concepts that users expect or have learned from the data. Second, it proposes an <strong>intelligent agent</strong> that elicits information fitting the users' expectations and discovers what may be unexpected for the users. Third, it relies on a new <strong>visualization </strong>approach focusing on how the large-scale data fits the users' knowledge and expectations, rather than solely the data. Fourth, this leads to <strong>novel exploratory data analysis techniques</strong> -- an interactive interplay between knowledge externalization, machine-guided data inspection, questioning, and reframing.</p><h4>Primary researchers involved</h4><p>The project is a joint collaboration between researchers from <strong>TU Wien</strong> (<a href=\"https://www.cg.tuwien.ac.at/users/waldner\">Manuela Waldner</a>) and the <strong>University of Applied Sciences St. Pölten</strong> (<a href=\"https://www.fhstp.ac.at/de/uber-uns/mitarbeiter-innen-a-z/zeppelzauer-matthias\">Matthias Zeppelzauer</a>), Austria, who contribute and join their complementary expertise on information visualization, visual analytics, and interactive ML.</p><p>&nbsp;</p><p>FWF Stand-alone project&nbsp;P 36453</p><p>DOI: <a href=\"https://doi.org/10.55776/P36453\">10.55776/P36453</a></p>",
        "start_date": "2023-05-01",
        "end_date": "2026-04-30",
        "leader_id": 1110,
        "logo": {
            "name": "logo.png",
            "path": "project:d9282",
            "type": "image/png",
            "size": 42258,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/d9282/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [
            "InfoVis"
        ],
        "publications": [
            "eitler-2024-sos",
            "eschner-2025-ide",
            "indirectBiasLanguageModels-2023",
            "irendorfer-2024-uat",
            "matt-2024-cvil",
            "matt-2025-scv",
            "stoff-2025-pvu",
            "webGPU_aggregateVis-2023",
            "wolf-2024-jhd"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/JDE",
        "__class": "Project"
    },
    {
        "id": "WorldScale",
        "workgroup_id": "rend",
        "drupal_id": 4158,
        "drupal_path": "/research/projects/WorldScale",
        "name": "Modeling the World at Scale",
        "name_de": "Modellierung der Welt nach Maß",
        "short_title": "WorldScale",
        "website": null,
        "status": "active",
        "short_abstract": "Vision: reconstruct a model of the world that permits online level-of-detail extraction.",
        "abstract": "<p>Vision: reconstruct a model of the world that permits online level-of-detail extraction. The core idea in this project is to interactively integrate sensed 3D data of varying sources and scales into a topologically clean surface. Our varying-scale model then permits online extraction of seamless levels of detail for rendering with minimal aliasing and popping artifacts. For this, we develop new topological guarantees to minimize the needed geometry. By exploiting the inherent redundancy of 2D surfaces in 3D, we design a fast way to robustly detect changes that let users better control the scan acquisition process. The topologically clean output surface and the change detection permit easy processing of the geometry for common use cases such as autonomous navigation, environment learning, augmented reality displays of georeferenced semantic information. An example application is fusing and distributing scans from the built-in sensors of multiple autonomous vehicles (ground, air), for incidental map updating as well as guaranteed efficient collision detection and tracking changes for path planning.</p>\r\n",
        "start_date": "2020-05-01",
        "end_date": "2026-04-30",
        "leader_id": 948,
        "logo": {
            "name": "logo.jpg",
            "path": "project:WorldScale",
            "type": "image/jpeg",
            "size": 64953,
            "orig_name": "logo.jpg",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/WorldScale/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "WWTF",
                "contract_number": "ICT19-009",
                "comment": "€ 578.450"
            }
        ],
        "research_areas": [
            "Geometry"
        ],
        "publications": [
            "antes-2026-maa",
            "ausserer-2026-baa",
            "balvan-rsf",
            "beleznai_csaba-2025-pr",
            "belic_dejan_pr",
            "bodzenta-2026-baa",
            "csecsy-fcd",
            "darovskikh_leonid-2025-prj",
            "deak-a3",
            "depner_dennis-2020-baa",
            "depner_dennis-2022-maa",
            "elagrod_ahmed-baa",
            "fitzinger_andreas-daa",
            "fraissl_niklas-2023-pra",
            "fuchs_peter-2024-baa",
            "fuericht_2025_prj",
            "fuericht_elias-2025-prj",
            "hamzic_ademir-baa",
            "harisch-mvs",
            "hatzinger-2026-baa",
            "hoffer-toth-com",
            "holom_2025-prj",
            "jira_marcel-2024-baa",
            "kaipel_nikolas-2025-baa",
            "karampekios_anastasios-prj",
            "kazda-tmd",
            "knoetzl_alexander-2025-prj",
            "kohlhas_jakob-2023-pra",
            "konrad-2020-baa",
            "kozonits_marcel-2025-baa",
            "kralicek_sebastian_baa",
            "kubicek-2024-baa",
            "lang_jakob-2022-baa",
            "lazarevic-tci",
            "leutschacher_2025_prj",
            "macho_jonas-2024-baa",
            "marin-2022-sig",
            "marin-2022-sigdt",
            "marin-2023-pic",
            "marin-2024-dsr",
            "marin-2024-pcp",
            "marin-2024-rcf",
            "marin-2024-sing",
            "markovich_aleksandar-2022-maa",
            "mayr_patrick_maa",
            "micu_2025-prj",
            "miklautsch_florian-baa",
            "mueller-2024-rbt",
            "mujadzic_iman_baa",
            "mujadzic-2023-hfp",
            "nawrocki-2024-nsr",
            "negi_ashish-2023-baa",
            "neumann-sfd",
            "ohrhallinger_stefan-2022-it1",
            "ohrhallinger_stefan-2022-tut",
            "ohrhallinger_stefan-2023-con",
            "ohrhallinger_stefan-2024-inv",
            "ohrhallinger-2021-egs",
            "ohrhallinger-2022-e2t",
            "pakkanen-2026-prj",
            "parakkat-2024-ballmerge",
            "pointner_simon-2022-maa",
            "proelss-bdp",
            "prohaska_julia-2023-baa",
            "reiher_joerg-2023-pra",
            "reiher-joerg-2024-daa",
            "rozanics-lsf",
            "rubik_michael-2022-baa",
            "schrammel_sebastian_baa",
            "schulz-faf",
            "seisenbacher-cyclesafely",
            "slajcho_levente-baa",
            "slavtchev-dcd",
            "starnberger-cli",
            "steinheber-2024-oto",
            "steinschauer-2020-baa",
            "trenovatz-lpd",
            "tumpach_barbara-pra",
            "vasiljevic-gpc",
            "wiesinger_2025-baa",
            "wiesinger_klemens-2023-baa",
            "wildt-tci",
            "withalm_brigitte-2022-baa",
            "wolffhardt-2026-prj",
            "yoo-chaeran-2025-pr",
            "zeilinger_matthias-2020-baa"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/WorldScale",
        "__class": "Project"
    },
    {
        "id": "d10027",
        "workgroup_id": "vis",
        "drupal_id": 10027,
        "drupal_path": "/research/projects/VAVis",
        "name": "Visual Analytics und Visualisierung",
        "name_de": "Visual Analytics und Visualisierung",
        "short_title": "VAVis",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": "<p class=\"ql-align-justify\">Im Rahmen des Projekts werden akademische Arbeiten mit einem Unternehmenspartner des VRVis betreut, andere Projektmanagementtätigkeiten (Projekte: HEROD und REINFORCE) durchgeführt, sowie gemeinsame Publikationstätigkeiten forciert und andere wissenschaftliche Kooperationen angestrebt. Deliverables: Endberichte unter anderem für die genannten Projekte, Papers, Tätigkeitsberichte für akademische Arbeiten (für COMET Jahresberichte), nach Absprache andere Dokumente und Deliverables.</p> \n<p><br></p>",
        "start_date": "2025-01-01",
        "end_date": "2025-12-31",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [
            "dhanoa-2025-ave"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/VAVis",
        "__class": "Project"
    },
    {
        "id": "d9691",
        "workgroup_id": "rend",
        "drupal_id": 9691,
        "drupal_path": "/research/projects/Potree-Next",
        "name": "Bringing Point Clouds to WebGPU",
        "name_de": "Bringing Point Clouds to WebGPU",
        "short_title": "Potree-Next",
        "website": "potree.org",
        "status": "active",
        "short_abstract": null,
        "abstract": "<p>Potree ist ein seit 2012 entwickelter Open-Source Punktwolkenviewer für Web Browser, der von zahlreichen Instituten und Firmen zur Visualisierung von 3D Scandaten verwendet wird. Mithilfe von Netidee wollen wir eine WebGPU-basierte Neuauflage schaffen die modernen Anforderungen genügt. Das Projekt richtet sich an Personen die massive Punktwolkendaten schnell und einfach über Browser ihren Benutzern darstellen wollen. Darunter fallen insbesondere Archäologen (Scans von Ruinen, Artefakten, Gebäuden), Architekten und Planer (Dokumentation des Ist-Zustandes), Regierungseinrichtungen (Ländermodelle z.B. Niederlande: 640 Milliarden Punkte, USA: 57 Billionen Punkte) und mehr. Wir wollen den erst dieses Jahr veröffentlichten \"WebGPU\" Standard für Browser dazu nutzen Potree - mit häufig angeforderten aber bis dahin kaum realisierbaren Features - frischen Wind zu geben. Insbesondere fallen darunter eine neue Level-of-Detail Datenstruktur mit verbesserter Qualität und Kompressionsrate, Unterstützung vom \"3D-Tiles\" Standard um auch Dreiecksmodelle zu supporten, sowie ein User Interface um beliebige Punktattribute - welche Essentieller Bestandteil sind - zu visualisieren. Wir schreiben Potree von Grund auf neu in WebGPU. Die seit 2023 verfügbaren Compute Shader erlauben es uns viele teure Processing schritte auf die GPU auszulagern, wodurch wir gestreamte 3D Daten - für die allgemeine Komprimierung ineffizient und langsam ist - direkt auf der GPU decoden können. User werden eigene Shader zum Rendern beliebiger Attribute schreiben, oder von defaults wählen können.</p>",
        "start_date": "2024-01-01",
        "end_date": "2025-06-30",
        "leader_id": 1116,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Potree-Next",
        "__class": "Project"
    },
    {
        "id": "d4559",
        "workgroup_id": "rend",
        "drupal_id": 4559,
        "drupal_path": "/research/projects/ECOLOPES",
        "name": "ECOlogical building enveLOPES: a game-changing design approach for regenerative urban ecosystems",
        "name_de": "ECOlogical building enveLOPES: a game-changing design approach for regenerative urban ecosystems",
        "short_title": "ECOLOPES",
        "website": "www.ecolopes.org",
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2021-04-01",
        "end_date": "2025-03-31",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/ECOLOPES",
        "__class": "Project"
    },
    {
        "id": "d4534",
        "workgroup_id": "vis",
        "drupal_id": 4534,
        "drupal_path": "/research/projects/xCTing",
        "name": "xCTing - Enabling X-ray CT based Industry 4.0 process chains by training Next Generation research experts",
        "name_de": "Aktivierung von auf Röntgen-CT basierenden Prozessketten der Industrie 4.0 durch die Ausbildung von Forschungsexperten der nächsten Generation",
        "short_title": "xCTing",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2021-03-01",
        "end_date": "2025-02-28",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/xCTing",
        "__class": "Project"
    },
    {
        "id": "d4729",
        "workgroup_id": "rend",
        "drupal_id": 4729,
        "drupal_path": "/research/projects/Unbeständige-Körper",
        "name": "Unstable Bodies",
        "name_de": "Unbeständige Körper",
        "short_title": "Unbeständige Körper",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2021-12-01",
        "end_date": "2024-11-30",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [
            "freude-2025-ras"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Unbeständige-Körper",
        "__class": "Project"
    },
    {
        "id": "d9555",
        "workgroup_id": "vis",
        "drupal_id": 9555,
        "drupal_path": "/research/projects/weBIGeo",
        "name": "Massive geographische Datenvisualisierung mit WebGPU",
        "name_de": "Massive geographische Datenvisualisierung mit WebGPU",
        "short_title": "weBIGeo",
        "website": "https://www.cg.tuwien.ac.at/research/projects/weBIGeo",
        "status": "active",
        "short_abstract": "<p>Geographische Daten, wie etwa Bewegungsdaten oder geolokalisierte Messungen über die Zeit, sind oft mehrere Gigabyte groß und können daher nicht mehr mit klassischen online Tools analysiert und präsentiert werden. Wir wollen helfen, Datenwissenschaftler_innen, Datenjournalist_innen und der breiten Masse diese Daten durch interaktive Echtzeitvisualisierung im Web zugänglich machen.</p><p>&nbsp;</p><h2>Links</h2><ul><li><a href=\"https://webigeo.alpinemaps.org/\">weBIGeo</a> (Online Demo, benötigt Chrome auf Desktop!)&nbsp;</li><li><a href=\"https://www.netidee.at/webigeo\">weBIGeo @ NetIdee</a> (Blog und andere Projektressourcen)&nbsp;</li><li><a href=\"https://github.com/weBIGeo\">weBIGeo @ GitHub</a> (Repository mit den Ergebnissen des NetIdee Projektes)&nbsp;</li><li><a href=\"https://github.com/AlpineMapsOrg\">AlpineMaps @ GitHub</a> (Repository mit WebGL-basiertem 3D Renderer und Basisbibliotheken)&nbsp;</li></ul>",
        "abstract": "<p>Geographische Daten, wie etwa Bewegungsdaten oder geolokalisierte Messungen über die Zeit, sind oft mehrere Gigabyte groß und können daher nicht mehr mit klassischen online Tools analysiert und präsentiert werden. Wir wollen helfen, Datenwissenschaftler_innen, Datenjournalist_innen und der breiten Masse diese Daten durch interaktive Echtzeitvisualisierung im Web zugänglich machen.</p><p>&nbsp;</p><h2>Links</h2><ul><li><a href=\"https://webigeo.alpinemaps.org/\">weBIGeo</a> (Online Demo, benötigt Chrome auf Desktop!)&nbsp;</li><li><a href=\"https://www.netidee.at/webigeo\">weBIGeo @ NetIdee</a> (Blog und andere Projektressourcen)&nbsp;</li><li><a href=\"https://github.com/weBIGeo\">weBIGeo @ GitHub</a> (Repository mit den Ergebnissen des NetIdee Projektes)&nbsp;</li><li><a href=\"https://github.com/AlpineMapsOrg\">AlpineMaps @ GitHub</a> (Repository mit WebGL-basiertem 3D Renderer und Basisbibliotheken)&nbsp;</li></ul>",
        "start_date": "2023-12-01",
        "end_date": "2024-11-30",
        "leader_id": 1110,
        "logo": {
            "name": "logo.png",
            "path": "project:d9555",
            "type": "image/png",
            "size": 8228091,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/d9555/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [
            "InfoVis",
            "Rendering"
        ],
        "publications": [
            "komon-2025-dco",
            "komon-2025-webigeo",
            "komon-kimmersdorfer-2025-weBIGeo",
            "muth-2026-clouds"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/weBIGeo",
        "__class": "Project"
    },
    {
        "id": "d4673",
        "workgroup_id": "vr",
        "drupal_id": 4673,
        "drupal_path": null,
        "name": "Smart automated check of BIM models with real buildings",
        "name_de": null,
        "short_title": null,
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2021-09-01",
        "end_date": "2024-08-31",
        "leader_id": 378,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at",
        "__class": "Project"
    },
    {
        "id": "d9671",
        "workgroup_id": "vis",
        "drupal_id": 9671,
        "drupal_path": "/research/projects/Math2Model",
        "name": "Welten bauen mit Mathematik - Onlinetool zur parametrischen Echtzeitmodellierung",
        "name_de": "Welten bauen mit Mathematik - Onlinetool zur parametrischen Echtzeitmodellierung",
        "short_title": "Math2Model",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": "<p>Wir sind Forscher an der TU Wien, die mittels Mathematik Bilder von 3D-Welten generieren wie in Videospielen oder Filmen. Technologischer Fortschritt ermöglicht es, die zugrundeliegende Geometrie solcher 3D-Welten nun auch online effizient mittels rein mathematischer Beschreibung zu erstellen. Generell ist das geplante Tool für alle, die sehen wollen, wie man schnell und einfach mit Hilfe von Mathematik Objekte oder Landschaften generieren kann. Neben diesem spielerischen/lehrenden Charakter kann das Tool von professionellen Spieleentwicklern, Architekten und 3D-Modellieren genutzt werden, um erste Entwürfe oder auch ganze 3D-Modelle zu generieren - alles ganz komfortabel online. Mathematische Modellierung kann in Desktopsoftware wie Rhino 6 (Grasshopper) oder auch mittels Blender (Geometry Nodes) durchgeführt werden. Allerdings erfordert dies die Installation der (teils kostenpflichtigen) Software und erheblichen Einarbeitungsaufwand. Bestehende Onlinetools arbeiten relativ langsam. Unser Onlinetool soll die Eintrittshürden senken und schnelle und einfache Modellierung aus mathematischen Formeln ermöglichen, wie man es sich von einem modernen Onlinetool erwartet. Mathematische Modellierung als Onlinetool war bisher aus technischer Sicht nur schwierig bzw. ineffizient zu lösen. Seit April 2023 hält allerdings eine neue Schnittstelle Einzug in moderne Browser, nämlich \"WebGPU\". Sie ermöglicht nun erstmalig das Ausführen sogenannter \"Compute Shader\" direkt im Browser, somit können allgemeine Berechnungen GPU-beschleunigt durchgeführt werden.</p>",
        "start_date": "2024-01-02",
        "end_date": "2024-07-01",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Math2Model",
        "__class": "Project"
    },
    {
        "id": "d4984",
        "workgroup_id": "rend",
        "drupal_id": 4984,
        "drupal_path": "/research/projects/Ecosys",
        "name": "Ecosystem Modeling Using Rendering Methods",
        "name_de": "Ecosystem Modeling Using Rendering Methods",
        "short_title": "Ecosys",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2022-07-01",
        "end_date": "2024-06-30",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [
            "Grossfurtner-2023-paego"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Ecosys",
        "__class": "Project"
    },
    {
        "id": "d4388",
        "workgroup_id": "rend",
        "drupal_id": 4388,
        "drupal_path": "/research/projects/Images2Mesh-Web",
        "name": "Photogrammetry made easy",
        "name_de": "Photogrammetrie einfach gemacht",
        "short_title": "Images2Mesh Web",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": "<p>Dieses Projekt richtet sich an alle Menschen, die gerne etwas in der echten Welt einscannen und anderen Menschen als 3D-Modell zeigen wollen. Beispielsweise könnte ein Artist unterwegs eine interessante Skulptur finden und diese in ein Spiel einbauen oder als Miniatur ausdrucken. Raumplaner könnten z.B. eine Kreuzung einscannen, nach ihren Plänen verändern und diese damit besser kommunizieren.</p>",
        "start_date": "2020-12-01",
        "end_date": "2024-03-31",
        "leader_id": 1395,
        "logo": {
            "name": "logo.png",
            "path": "project:d4388",
            "type": "image/png",
            "size": 1173196,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/d4388/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [
            "Geometry",
            "Modeling"
        ],
        "publications": [
            "erler_2024_ppsurf",
            "erler_philipp-2017-phd",
            "pichler_2022_fro",
            "steinschorn-2023-p2m"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Images2Mesh-Web",
        "__class": "Project"
    },
    {
        "id": "d4710",
        "workgroup_id": "vr",
        "drupal_id": 4710,
        "drupal_path": null,
        "name": "Multi-User Mixed Reality System for flexible First Responder Training",
        "name_de": null,
        "short_title": null,
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2021-11-01",
        "end_date": "2023-10-31",
        "leader_id": 378,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at",
        "__class": "Project"
    },
    {
        "id": "Superhumans",
        "workgroup_id": "rend",
        "drupal_id": 4166,
        "drupal_path": "/research/projects/Superhumans",
        "name": "Superhumans - Walking Through Walls",
        "name_de": null,
        "short_title": "Superhumans",
        "website": null,
        "status": "active",
        "short_abstract": "<p>In recent years, virtual and augmented reality have gained widespread attention because of newly developed head-mounted displays. For the first time, mass-market penetration seems plausible. Also, range sensors are on the verge of being integrated into smartphones, evidenced by prototypes such as the Google Tango device, making ubiquitous on-line acquisition of 3D data a possibility. The combination of these two technologies – displays and sensors – promises applications where users can directly be immersed into an experience of 3D data that was just captured live. However, the captured data needs to be processed and structured before being displayed. For example, sensor noise needs to be removed, normals need to be estimated for local surface reconstruction, etc. The challenge is that these operations involve a large amount of data, and in order to ensure a lag-free user experience, they need to be performed in real time, i.e., in just a few milliseconds per frame. In this proposal, we exploit the fact that dynamic point clouds captured in real time are often only relevant for display and interaction in the current frame and inside the current view frustum. In particular, we propose a new view-dependent data structure that permits efficient connectivity creation and traversal of unstructured data, which will speed up surface recovery, e.g. for collision detection. Classifying occlusions comes at no extra cost, which will allow quick access to occluded layers in the current view. This enables new methods to explore and manipulate dynamic 3D scenes, overcoming interaction methods that rely on physics-based metaphors like walking or flying, lifting interaction with 3D environments to a “superhuman” level.</p>\r\n",
        "abstract": "<p>In recent years, virtual and augmented reality have gained widespread attention because of newly developed head-mounted displays. For the first time, mass-market penetration seems plausible. Also, range sensors are on the verge of being integrated into smartphones, evidenced by prototypes such as the Google Tango device, making ubiquitous on-line acquisition of 3D data a possibility. The combination of these two technologies – displays and sensors – promises applications where users can directly be immersed into an experience of 3D data that was just captured live. However, the captured data needs to be processed and structured before being displayed. For example, sensor noise needs to be removed, normals need to be estimated for local surface reconstruction, etc. The challenge is that these operations involve a large amount of data, and in order to ensure a lag-free user experience, they need to be performed in real time, i.e., in just a few milliseconds per frame. In this proposal, we exploit the fact that dynamic point clouds captured in real time are often only relevant for display and interaction in the current frame and inside the current view frustum. In particular, we propose a new view-dependent data structure that permits efficient connectivity creation and traversal of unstructured data, which will speed up surface recovery, e.g. for collision detection. Classifying occlusions comes at no extra cost, which will allow quick access to occluded layers in the current view. This enables new methods to explore and manipulate dynamic 3D scenes, overcoming interaction methods that rely on physics-based metaphors like walking or flying, lifting interaction with 3D environments to a “superhuman” level.</p>\r\n",
        "start_date": "2019-09-01",
        "end_date": "2023-08-31",
        "leader_id": 948,
        "logo": {
            "name": "logo.jpg",
            "path": "project:Superhumans",
            "type": "image/jpeg",
            "size": 96578,
            "orig_name": "logo.jpg",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/Superhumans/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P32418-N31",
                "comment": "332.780,70 €"
            }
        ],
        "research_areas": [
            "Geometry",
            "VR"
        ],
        "publications": [
            "erler_2024_ppsurf",
            "erler_philipp-2017-phd",
            "erler-2020-p2s",
            "leimer_2020-cag",
            "marin-2024-pcp",
            "parakkat-2024-ballmerge",
            "rait_alexius-2021-baa",
            "SCHUETZ-2020-MPC",
            "schuetz-2020-PPC"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Superhumans",
        "__class": "Project"
    },
    {
        "id": "VHH",
        "workgroup_id": "vr",
        "drupal_id": 4160,
        "drupal_path": "/research/projects/VHH",
        "name": "Visual History of the Holocaust: Rethinking Curation in the Digital Age",
        "name_de": null,
        "short_title": "VHH",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2019-01-01",
        "end_date": "2022-12-31",
        "leader_id": 378,
        "logo": null,
        "funding_organisations": [
            {
                "id": "eu7",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/VHH",
        "__class": "Project"
    },
    {
        "id": "d4386",
        "workgroup_id": "rend",
        "drupal_id": 4386,
        "drupal_path": "/research/projects/LargeClouds2BIM",
        "name": "Efficient workflow transforming large 3D point clouds to Building Information Models with user-assisted automatization",
        "name_de": "Efficient workflow transforming large 3D point clouds to Building Information Models with user-assisted automatization",
        "short_title": "LargeClouds2BIM",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2020-12-01",
        "end_date": "2022-11-30",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [
            "SCHUETZ-2021-PCC",
            "SCHUETZ-2022-PCC",
            "SCHUETZ-2023-LOD",
            "SCHUETZ-2024-SIMLOD"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/LargeClouds2BIM",
        "__class": "Project"
    },
    {
        "id": "d4322",
        "workgroup_id": "vr",
        "drupal_id": 4322,
        "drupal_path": "/research/projects/VR-Tennis-Trainer",
        "name": "Virtual Reality Tennis Trainer",
        "name_de": null,
        "short_title": "VR Tennis Trainer",
        "website": null,
        "status": "active",
        "short_abstract": "<p style=\"text-align:justify; margin-bottom:11px\"><span style=\"font-size:11pt\"><span style=\"line-height:107%\"><span style=\"font-family:&quot;Calibri&quot;,sans-serif\">This research project focuses on 3D motion analysis and motion learning methodologies. We design novel methods for automated analysis of human motion by machine learning. These methods can be applicable in real training scenario or in VR training setup. The results of our motion analysis can help players better understand the errors in their motion and lead to improvement of motion performance. Our motion analysis methods are based on professional knowledge from tennis experts from our partner company <a href=\"https://www.vr-motion-learning.com/\" style=\"color:#0563c1; text-decoration:underline\">VR Motion Learning GmbH &amp; Co KG</a>. We use numerous motion features, including rotations, positions, velocities and others, to analyze the motion. </span></span></span></p>\r\n\r\n<p style=\"text-align:justify; margin-bottom:11px\"><span style=\"font-size:11pt\"><span style=\"line-height:107%\"><span style=\"font-family:&quot;Calibri&quot;,sans-serif\">Our goal is to use virtual reality as scenario for learning correct tennis technique that will be applicable in real tennis game. For this purpose, we plan to join our motion analysis with error visualization techniques in 3D and with novel motion learning methodologies. These methodologies may lead to learning correct sport technique, improvement of performance and prevention of injuries.</span></span></span></p>\r\n",
        "abstract": "<p style=\"text-align:justify; margin-bottom:11px\"><span style=\"font-size:11pt\"><span style=\"line-height:107%\"><span style=\"font-family:&quot;Calibri&quot;,sans-serif\">This research project focuses on 3D motion analysis and motion learning methodologies. We design novel methods for automated analysis of human motion by machine learning. These methods can be applicable in real training scenario or in VR training setup. The results of our motion analysis can help players better understand the errors in their motion and lead to improvement of motion performance. Our motion analysis methods are based on professional knowledge from tennis experts from our partner company <a href=\"https://www.vr-motion-learning.com/\" style=\"color:#0563c1; text-decoration:underline\">VR Motion Learning GmbH &amp; Co KG</a>. We use numerous motion features, including rotations, positions, velocities and others, to analyze the motion. </span></span></span></p>\r\n\r\n<p style=\"text-align:justify; margin-bottom:11px\"><span style=\"font-size:11pt\"><span style=\"line-height:107%\"><span style=\"font-family:&quot;Calibri&quot;,sans-serif\">Our goal is to use virtual reality as scenario for learning correct tennis technique that will be applicable in real tennis game. For this purpose, we plan to join our motion analysis with error visualization techniques in 3D and with novel motion learning methodologies. These methodologies may lead to learning correct sport technique, improvement of performance and prevention of injuries.</span></span></span></p>\r\n",
        "start_date": "2020-02-01",
        "end_date": "2022-10-31",
        "leader_id": 378,
        "logo": {
            "name": "logo.jpg",
            "path": "project:d4322",
            "type": "image/jpeg",
            "size": 237629,
            "orig_name": "logo.jpg",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/d4322/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [
            "VR"
        ],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/VR-Tennis-Trainer",
        "__class": "Project"
    },
    {
        "id": "d4307",
        "workgroup_id": "rend",
        "drupal_id": 4307,
        "drupal_path": "/research/projects/BIMstocks",
        "name": "Digital Urban Mining Platform: Assessing the material composition of building stocks through coupling of BIM to GIS",
        "name_de": "Digitale Urban Mining Plattform: Analyse der materiellen Zusammensetzung von bestehenden Gebäuden durch Kopplung von BIM und GIS",
        "short_title": "BIMstocks",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": "<p>The strong population growth and urbanization are increasing the global resources and energy consumption. The AEC (architecture, engineering and construction) industry is responsible for 60% of the extracted raw materials and generates 40% of the energy-related CO<sub>2</sub> emissions. In Austria, the AEC sector is responsible for 70% of total annual waste – facts that are underlining the importance of implementing recycling strategies. The building stock has great potential to serve as raw material reservoir, however currently there is a lack of comprehensive knowledge about the actual building stock, which is the largest obstacle for reusing and recycling of materials and elements.</p> \n<p>The <strong>main goal</strong> of BIMstocks is to develop a method for a consistent digital documentation of the material composition of the existing building stock for modeling the secondary raw materials cadaster and prediction of the recycling potential, by creating a catalogue of BIM-Objects of typically Viennese buildings and follow-up generation of as-built BIM-Models, thus enabling an upscaling to city level. Analyzing and scanning of 10 different use cases, which will represent the variety of typical Viennese buildings, will enable the upscaling to city level. The final aim is to generate a GIS-based Urban Mining Platform, which embeds the obtained information of the use cases and predicts the recycling potential, the material flow and waste mass. Furthermore, a <strong>framework</strong> will be developed in order to enable the application of urban mining strategies. The framework should describe all individual steps as well as the applied methods.</p> \n<p>Thus, the project represents the continuation of the framework developed in the research project SCI_BIM, which investigated an integrated determination of geometry and material by coupling laser scanning and GPR technology for the semi-automated BIM-Model generation. SCI_BIM demonstrated that GPR technology needs further testing to a) apply it to different building structures and b) build-up a material database, which would significantly increase the efficiency of material determination.</p> \n<p>The <strong>innovation</strong> of the project is the coupling of different technologies, which enable upscaling from component-level to city-level: scanning technology using GPR, application of machine learning for the automated determination of material compositions, and predictive modelling at city-level in the digital urban mining platform. For the first time the uncertainties resulting from the use case samples, the measured values and the extrapolation are estimated. The intended result is to generate a building catalogue for typical Viennese buildings, which enables upscaling on city-level as well as embedding the components and buildings into the GIS-based Urban Mining Platform, based on GPR scans and subsequent machine learning algorithms.</p> \n<p><span style=\"color: black;\">The </span><strong style=\"color: black;\">main use</strong><span style=\"color: black;\"> of the obtained results from BIMstocks is the increase of recycling rates by applying urban mining strategies, for which the generated public urban mining platform serves as a basis.&nbsp;&nbsp;</span></p>",
        "start_date": "2020-10-01",
        "end_date": "2022-09-30",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [
            "gilmutdinov-2022-aomlbwug"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/BIMstocks",
        "__class": "Project"
    },
    {
        "id": "d4317",
        "workgroup_id": "vr",
        "drupal_id": 4317,
        "drupal_path": "/research/projects/BRIDGES",
        "name": "A hyBRID (physical-diGital) multi-user Extended reality platform as a stimulus for industry uptake of interactive technologieS",
        "name_de": null,
        "short_title": "BRIDGES",
        "website": null,
        "status": "active",
        "short_abstract": "<p>The BRIDGES project aims at “bridging” the gap between interactive technologies and industries by bringing XR to the real world!<br />\r\nOur mission is moving towards the “democratisation” of XR by delivering a flexible and scalable solution that can be easily integrated and customised to the needs of a variety of different stakeholders.</p>\r\n\r\n<p>For more information please refer to:</p>\r\n\r\n<p><a class=\"moz-txt-link-freetext\" href=\"https://www.bridges-horizon.eu/\">https://www.bridges-horizon.eu/</a></p>\r\n\r\n<p>&nbsp;</p>\r\n",
        "abstract": "<p>The BRIDGES project aims at “bridging” the gap between interactive technologies and industries by bringing XR to the real world!<br />\r\nOur mission is moving towards the “democratisation” of XR by delivering a flexible and scalable solution that can be easily integrated and customised to the needs of a variety of different stakeholders.</p>\r\n\r\n<p>For more information please refer to:</p>\r\n\r\n<p><a class=\"moz-txt-link-freetext\" href=\"https://www.bridges-horizon.eu/\">https://www.bridges-horizon.eu/</a></p>\r\n\r\n<p>&nbsp;</p>\r\n",
        "start_date": "2020-10-01",
        "end_date": "2022-09-30",
        "leader_id": 378,
        "logo": {
            "name": "logo.png",
            "path": "project:d4317",
            "type": "image/png",
            "size": 67733,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/d4317/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/BRIDGES",
        "__class": "Project"
    },
    {
        "id": "EVOCATION",
        "workgroup_id": "rend",
        "drupal_id": 4162,
        "drupal_path": "/research/projects/EVOCATION",
        "name": "Advanced Visual and Geometric Computing for 3D Capture, Display, and Fabrication",
        "name_de": null,
        "short_title": "EVOCATION",
        "website": null,
        "status": "active",
        "short_abstract": "This Marie-Curie project creates a leading European-wide doctoral college for research in Advanced Visual and Geometric\nComputing for 3D Capture, Display, and Fabrication.",
        "abstract": "The project will create a leading European-wide doctoral Collegium for research in Advanced Visual and Geometric Computing for 3D Capture, Display, and Fabrication (EVOCATION). The Collegium will train the next generation of creative, entrepreneurial and innovative experts who will be equipped with the necessary skills and competences to face current and future major challenges in scalable and high-fidelity geometry and material acquisition, extraction of structure and semantic information, processing, visualization, 3D display and 3D fabrication in professional and consumer applications. In the future, the ESRs will lead research and development of new visual and geometric computing methods in the widest variety of applications, ranging from industrial design to humanities, from medical training to urban assessment, and from creative industries to education methodologies. The EVOCATION network of public and private entities will be naturally multidisciplinary and multi-institutional and will: (a) promote, through domain-specific challenges, the culture of open science and multidisciplinary research applied to concrete problems of the real world, in strict cooperation with end users in engineering, science and humanities; (b) advance the state-of- the-art in geometry and material acquisition, geometry processing and semantic feature extraction, interactive visualization, computational fabrication, and high-bandwidth/3D display systems; (c) bridge complementary approaches for cost-effective data digitization, visualization, fabrication, and display through the integration of different methodologies in the 3D capture, processing and fabrication pipeline; (d) demonstrate the feasibility and efficiency of scalable cost-effective end-to-end techniques to virtually and physically capture and create objects with complex shape and appearance; (e) increase awareness of the benefits of advanced visual/geometric computing technology in both professional and consumer domains.",
        "start_date": "2018-10-01",
        "end_date": "2022-09-30",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:EVOCATION",
            "type": "image/png",
            "size": 34154,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/EVOCATION/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "Horizon 2020",
                "contract_number": "ITN 813170",
                "comment": null
            }
        ],
        "research_areas": [
            "Fabrication",
            "Geometry",
            "Modeling",
            "Rendering"
        ],
        "publications": [
            "cardoso-2021-cost",
            "cardoso-2022-rtpercept",
            "cardoso-2024-r-c",
            "cardoso-thesis",
            "celarek_adam-2019-qelta",
            "celarek-2022-gmcn",
            "FRAISS-2022-CGMM",
            "hanko-2019-ani",
            "richter-2020-ani",
            "thurner-2019-ani",
            "wieser-2019-ani"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/EVOCATION",
        "__class": "Project"
    },
    {
        "id": "BimFlexi",
        "workgroup_id": "vr",
        "drupal_id": 4164,
        "drupal_path": "/research/projects/BimFlexi",
        "name": "BIM-based digital Plattform for design and optimisation of flexible facilities for Industry 4.0",
        "name_de": null,
        "short_title": "BimFlexi",
        "website": null,
        "status": "active",
        "short_abstract": "<p>Industrial Building Design is a design process where the successful implementation of each project is based on collaborative decision making of multiple domain specialists - architects, engineers, production system planners and building owners. Traditionally, such multi-collaborator workflows are subject to conflicting stakeholder goals and frequent changes in production processes inevitably resulting in lengthy planning periods. This particular design process needs novel approaches to decision-making support which would combine the ability to communicate design intent with real-time feedback on the impact of design decisions.<br />\r\nBimFlexi project aims to accelerate BIM design processes for industrial buildings by using parametric modelling, multi-parameter optimization and collaborative VR exploration and modification of models at early stages of building planning.</p>\r\n",
        "abstract": "<p>Industrial Building Design is a design process where the successful implementation of each project is based on collaborative decision making of multiple domain specialists - architects, engineers, production system planners and building owners. Traditionally, such multi-collaborator workflows are subject to conflicting stakeholder goals and frequent changes in production processes inevitably resulting in lengthy planning periods. This particular design process needs novel approaches to decision-making support which would combine the ability to communicate design intent with real-time feedback on the impact of design decisions.<br />\r\nBimFlexi project aims to accelerate BIM design processes for industrial buildings by using parametric modelling, multi-parameter optimization and collaborative VR exploration and modification of models at early stages of building planning.</p>\r\n",
        "start_date": "2020-03-01",
        "end_date": "2022-08-31",
        "leader_id": 378,
        "logo": {
            "name": "logo.png",
            "path": "project:BimFlexi",
            "type": "image/png",
            "size": 66178,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/BimFlexi/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [
            "VR"
        ],
        "publications": [
            "podkosova_2022_bimflexi-vr",
            "Reisinger_Julia-2021-parametricscript",
            "Reisinger-2021-JOBE"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/BimFlexi",
        "__class": "Project"
    },
    {
        "id": "d4400",
        "workgroup_id": "vr",
        "drupal_id": 4400,
        "drupal_path": "/research/projects/DENOISING",
        "name": "Denoising for Real-Time Ray Tracing",
        "name_de": null,
        "short_title": "DENOISING",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": "<p>The project aims at leveraging specific image generation processes to improve quality and speed of denoise algorithms on mobile platform.</p>",
        "start_date": "2021-01-01",
        "end_date": "2022-07-31",
        "leader_id": 378,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/DENOISING",
        "__class": "Project"
    },
    {
        "id": "d4713",
        "workgroup_id": "vr",
        "drupal_id": 4713,
        "drupal_path": "/research/projects/Zoll4D",
        "name": "Detection of Vehicle Hiding Places with Augmented Reality",
        "name_de": null,
        "short_title": "Zoll4D",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2021-12-01",
        "end_date": "2022-06-30",
        "leader_id": 378,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Zoll4D",
        "__class": "Project"
    },
    {
        "id": "3DSpatialization",
        "workgroup_id": "rend",
        "drupal_id": 4156,
        "drupal_path": "/research/projects/3DSpatialization",
        "name": "Smart Communities and Technologies: 3D Spatialization",
        "name_de": null,
        "short_title": "3DSpatialization",
        "website": "https://smartct.tuwien.ac.at/",
        "status": "active",
        "short_abstract": "<p>The Research Cluster \"Smart Communities and Technologies\" (Smart CT) at TU Wien will provide the scientific underpinnings for next-generation complex smart city and communities infrastructures. Cities are ever-evolving, complex cyber physical systems of systems covering a magnitude of different areas. The initial concept of smart cities and communities started with cities utilizing communication technologies to deliver services to their citizens and evolved to using information technology to be smarter and more efficient about the utilization of their resources. In recent years however, information technology has changed significantly, and with it the resources and areas addressable by a smart city have broadened considerably. They now cover areas like smart buildings, smart products and production, smart traffic systems and roads, autonomous driving, smart grids for managing energy hubs and electric car utilization or urban environmental systems research.</p>\r\n\r\n<p>3D spatialization creates the link between the internet of cities infrastructure and the actual 3D world in which a city is embedded in order to perform advanced computation and visualization tasks. Sensors, actuators and users are embedded in a complex 3D environment that is constantly changing. Acquiring, modeling and visualizing this dynamic 3D environment are the challenges we need to face using methods from Visual Computing and Computer Graphics. 3D Spatialization aims to make a city aware of its 3D environment, allowing it to perform spatial reasoning to solve problems like visibility, accessibility, lighting, and energy efficiency.</p>\r\n",
        "abstract": "<p>The Research Cluster \"Smart Communities and Technologies\" (Smart CT) at TU Wien will provide the scientific underpinnings for next-generation complex smart city and communities infrastructures. Cities are ever-evolving, complex cyber physical systems of systems covering a magnitude of different areas. The initial concept of smart cities and communities started with cities utilizing communication technologies to deliver services to their citizens and evolved to using information technology to be smarter and more efficient about the utilization of their resources. In recent years however, information technology has changed significantly, and with it the resources and areas addressable by a smart city have broadened considerably. They now cover areas like smart buildings, smart products and production, smart traffic systems and roads, autonomous driving, smart grids for managing energy hubs and electric car utilization or urban environmental systems research.</p>\r\n\r\n<p>3D spatialization creates the link between the internet of cities infrastructure and the actual 3D world in which a city is embedded in order to perform advanced computation and visualization tasks. Sensors, actuators and users are embedded in a complex 3D environment that is constantly changing. Acquiring, modeling and visualizing this dynamic 3D environment are the challenges we need to face using methods from Visual Computing and Computer Graphics. 3D Spatialization aims to make a city aware of its 3D environment, allowing it to perform spatial reasoning to solve problems like visibility, accessibility, lighting, and energy efficiency.</p>\r\n",
        "start_date": "2018-02-01",
        "end_date": "2021-12-31",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:3DSpatialization",
            "type": "image/png",
            "size": 134233,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/3DSpatialization/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [
            "Geometry",
            "Modeling",
            "Rendering"
        ],
        "publications": [
            "Arleo-2019-vis",
            "celarek-2022-gmcn",
            "gamsjaeger-2022-proc",
            "HANN_2022_IPT",
            "horvath_imp",
            "kenzel_michael_2021_cuda",
            "kerbl_2019_planet_poster",
            "kerbl_2021_hdg",
            "kerbl-2020-improvencoding",
            "kerbl-2022-cuda",
            "kerbl-2022-trienc",
            "kristmann-2022-occ",
            "murturi_PGG",
            "pernsteinre_jakob_2020_eechc",
            "roth_2021_vdst",
            "roth_vdi",
            "rumpelnik_martin_2020_PRM",
            "Rumpelnik_Martin-2020-NPR",
            "SCHUETZ-2021-PCC",
            "stappen_SteFAS",
            "tatzgern-2020-sst",
            "unterguggenberger-2020-fmvr",
            "unterguggenberger-2021-msh",
            "unterguggenberger-2022-vulkan"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/3DSpatialization",
        "__class": "Project"
    },
    {
        "id": "d4958",
        "workgroup_id": "rend",
        "drupal_id": 4958,
        "drupal_path": "/research/projects/GS_Buildings",
        "name": "Green and Smart Buildings: Solar Irradiation Analysis for Early Design Phases",
        "name_de": "Green and Smart Buildings: Solar Irradiation Analysis for Early Design Phases",
        "short_title": "G&S_Buildings",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": "<p>The main objective of this project is to demonstrate a novel approach to solve the problem of irradiation analysis in architectural design significantly faster than existing methods. This speed increase in turn enables new design workflows in architectural design. It will enable the designer to reduce the expected energy consumption of the designed building. More generally we will show that a difficult problem, previously viewed as a simulation problem, can be addressed utilizing the technology and techniques from computer graphics, specifically GPU accelerated rendering. </p>",
        "start_date": "2021-01-01",
        "end_date": "2021-09-30",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/GS_Buildings",
        "__class": "Project"
    },
    {
        "id": "geo-materials",
        "workgroup_id": null,
        "drupal_id": 4187,
        "drupal_path": "/research/projects/geo-materials",
        "name": "Computational Design of Geometric Materials",
        "name_de": null,
        "short_title": "geo-materials",
        "website": null,
        "status": "active",
        "short_abstract": "In this project we want to research novel materials whose mechanical behavior is described by the complexity of their geometry. Such “geometric materials” are cellular structures whose properties depend on the shape and the connectivity of their cells, while the actual physical substance they are built of is constant across the entire object. ",
        "abstract": "<p>In this project we want to research novel materials whose mechanical behavior is described by the complexity of their geometry. Such “geometric materials” are cellular structures whose properties depend on the shape and the connectivity of their cells, while the actual physical substance they are built of is constant across the entire object. Our goal is to develop the first computational model for the analysis and simulation of complex geometric materials, as well as a so-called goal-based computational design framework for their synthesis. Goal-based means that the desired behavior can be specified a-priori by the designer, and an appropriate geometric structure that best approximates the given goals is computed automatically. Our main research problem is how to map mechanical properties to geometric connections of cellular structures.</p>\r\n",
        "start_date": "2016-03-01",
        "end_date": "2021-08-31",
        "leader_id": 844,
        "logo": {
            "name": "logo.png",
            "path": "project:geo-materials",
            "type": "image/png",
            "size": 298337,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/geo-materials/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "WWTF",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "leimer_2020-cag",
            "leimer-2018-sar"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/geo-materials",
        "__class": "Project"
    },
    {
        "id": "Wohnen 4.0",
        "workgroup_id": "rend",
        "drupal_id": 4168,
        "drupal_path": "/research/projects/Wohnen-40",
        "name": "Wohnen 4.0 - Digital Platform for Affordable Housing",
        "name_de": null,
        "short_title": "Wohnen 4.0",
        "website": null,
        "status": "active",
        "short_abstract": "<p>This is a joint project with the civil engineering faculty and several companies. Its aim is the development of an Integrated Framework “Housing 4.0”; a digital platform supporting integrated planning and project delivery through coupling various digital tools and databases, like Building Information Modeling (BIM) for Design to Production&nbsp; and Parametric Habitat Designer.</p>\r\n\r\n<p>Our goal is to exploit the potential of BIM for modular, off-site housing assembly in order to improve planning and construction processes, reduce cost and construction time and allow for mass customization will be explored.</p>\r\n\r\n<p>The novel approach in this project is user-involvement; which has been neglected in recent national and international projects on off-site, modular construction supported by digital technologies. A parametric design tool should allow different stakeholders to explore both high-level and low-level options and their impact on the construction project so that mutually optimal solutions can be found easier.</p>\r\n",
        "abstract": "<p>This is a joint project with the civil engineering faculty and several companies. Its aim is the development of an Integrated Framework “Housing 4.0”; a digital platform supporting integrated planning and project delivery through coupling various digital tools and databases, like Building Information Modeling (BIM) for Design to Production&nbsp; and Parametric Habitat Designer.</p>\r\n\r\n<p>Our goal is to exploit the potential of BIM for modular, off-site housing assembly in order to improve planning and construction processes, reduce cost and construction time and allow for mass customization will be explored.</p>\r\n\r\n<p>The novel approach in this project is user-involvement; which has been neglected in recent national and international projects on off-site, modular construction supported by digital technologies. A parametric design tool should allow different stakeholders to explore both high-level and low-level options and their impact on the construction project so that mutually optimal solutions can be found easier.</p>\r\n",
        "start_date": "2019-09-01",
        "end_date": "2021-08-31",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [
            "Modeling"
        ],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Wohnen-40",
        "__class": "Project"
    },
    {
        "id": "illvisation",
        "workgroup_id": "vis",
        "drupal_id": 4171,
        "drupal_path": "/research/projects/illvisation",
        "name": "Visual Computing: Illustrative Visualization",
        "name_de": null,
        "short_title": "illvisation",
        "website": null,
        "status": "active",
        "short_abstract": "<p>The central focus of our research is to understand visual abstraction. Understanding means 1. to identify meaningful visual abstractions, 2. to assess their effectiveness for human perception and cognition and 3. to formalize them to be executable on a computational machinery. The outcome of the investigation is useful for designing visualizations for a given scenario or need, whose effectiveness can be quantified and thus the most understandable visualization design can be effortlessly determined. The science of visualization has already gained some understanding of structural visual abstraction. When for example illustrators, artists, and visualization designers convey certain structure, or visually express how things look, we can often provide a scientifically-founded argument whether and why is their expression effective for human cognitive processing. What has not been given sufficient scientific attention to, is advancing the understanding of procedural visual abstraction, in other words investigating visual means that convey what things do or how things work. This missing piece of knowledge would be very useful for visual depiction of processes and dynamics that are omnipresent in science, technology, but also in our everyday lives. The upcoming project will therefore investigate theoretical foundations for visualization of processes. Physiological processes that describe the complex machinery of biological life will be picked as a target scenario. The reason for this choice is two-fold. Firstly, these processes are immensely complex, are carried-out on various spatial and temporal levels simultaneously, and can be sufficiently understood only if all scales are considered. Secondly, physiological processes have been modeled as a result of intensive research in biology, systems biology, and biochemistry and are available in a form of digital data. The goal will be to visually communicate how physiological processes participate on life by considering the limitations of human perceptual and cognitive capabilities. By solving individual visualization problems of this challenging target scenario, the research will provide first pieces of understanding of procedural visual abstractions that are generally applicable, beyond the chosen target domain. Prototype implementation of the developed technology is available at the GitHub repository: https://github.com/illvisation/</p>\r\n\r\n<p>Prototype implementation of the developed technology is available at the GitHub repository:<br />\r\nhttps://github.com/illvisation/</p>\r\n\r\n<h2><a name=\"General Information\"></a>cellVIEW</h2>\r\n\r\n<p>cellVIEW is a new tool that provides fast rendering of very large biological macromolecular scenes and is inspired by state-of-the-art computer graphics techniques. Click <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/cellview/cellview.php\">here</a> for additional information.</p>\r\n\r\n<h2><a name=\"General Information\"></a>Invited Talks</h2>\r\n\r\n<p>18.11.2016: <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/talks/Envisioning_the_Visible_Molecular_Cell-Part1\">Arthur J. Olson, Envisioning the Visible Molecular Cell</a><br />\r\n17.10.2016: Kwan-Liu Ma, Emerging Topics for Visualization Research: <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/talks/Kwan-Liu_Ma/Kwan-Liu Ma_Talk_part1\">Part1</a>, <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/talks/Kwan-Liu_Ma/Kwan-Liu Ma_Talk_part2\">Part2</a><br />\r\n07.10.2016: <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/talks/Marc_Streit/streit\">Marc Streit, From Visual Exploration to Storytelling and Back Again</a><br />\r\n04.12.2015: <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/talks/Jan_Palacek/palecek\">Jan Palacek, Visual Analysis of Protein Complexes: From Protein Interaction to Cellular Processes</a><br />\r\n19.04.2013: <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/talks/Jan_Koenderink/LS_IllVis_01_Koenderink_Shape_in_visual_awareness\">Jan Koenderink, Shape in Visual Awareness</a></p>\r\n",
        "abstract": "<p>The central focus of our research is to understand visual abstraction. Understanding means 1. to identify meaningful visual abstractions, 2. to assess their effectiveness for human perception and cognition and 3. to formalize them to be executable on a computational machinery. The outcome of the investigation is useful for designing visualizations for a given scenario or need, whose effectiveness can be quantified and thus the most understandable visualization design can be effortlessly determined. The science of visualization has already gained some understanding of structural visual abstraction. When for example illustrators, artists, and visualization designers convey certain structure, or visually express how things look, we can often provide a scientifically-founded argument whether and why is their expression effective for human cognitive processing. What has not been given sufficient scientific attention to, is advancing the understanding of procedural visual abstraction, in other words investigating visual means that convey what things do or how things work. This missing piece of knowledge would be very useful for visual depiction of processes and dynamics that are omnipresent in science, technology, but also in our everyday lives. The upcoming project will therefore investigate theoretical foundations for visualization of processes. Physiological processes that describe the complex machinery of biological life will be picked as a target scenario. The reason for this choice is two-fold. Firstly, these processes are immensely complex, are carried-out on various spatial and temporal levels simultaneously, and can be sufficiently understood only if all scales are considered. Secondly, physiological processes have been modeled as a result of intensive research in biology, systems biology, and biochemistry and are available in a form of digital data. The goal will be to visually communicate how physiological processes participate on life by considering the limitations of human perceptual and cognitive capabilities. By solving individual visualization problems of this challenging target scenario, the research will provide first pieces of understanding of procedural visual abstractions that are generally applicable, beyond the chosen target domain. Prototype implementation of the developed technology is available at the GitHub repository: https://github.com/illvisation/</p>\r\n\r\n<p>Prototype implementation of the developed technology is available at the GitHub repository:<br />\r\nhttps://github.com/illvisation/</p>\r\n\r\n<h2><a name=\"General Information\"></a>cellVIEW</h2>\r\n\r\n<p>cellVIEW is a new tool that provides fast rendering of very large biological macromolecular scenes and is inspired by state-of-the-art computer graphics techniques. Click <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/cellview/cellview.php\">here</a> for additional information.</p>\r\n\r\n<h2><a name=\"General Information\"></a>Invited Talks</h2>\r\n\r\n<p>18.11.2016: <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/talks/Envisioning_the_Visible_Molecular_Cell-Part1\">Arthur J. Olson, Envisioning the Visible Molecular Cell</a><br />\r\n17.10.2016: Kwan-Liu Ma, Emerging Topics for Visualization Research: <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/talks/Kwan-Liu_Ma/Kwan-Liu Ma_Talk_part1\">Part1</a>, <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/talks/Kwan-Liu_Ma/Kwan-Liu Ma_Talk_part2\">Part2</a><br />\r\n07.10.2016: <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/talks/Marc_Streit/streit\">Marc Streit, From Visual Exploration to Storytelling and Back Again</a><br />\r\n04.12.2015: <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/talks/Jan_Palacek/palecek\">Jan Palacek, Visual Analysis of Protein Complexes: From Protein Interaction to Cellular Processes</a><br />\r\n19.04.2013: <a href=\"https://www.cg.tuwien.ac.at/research/projects/illvisation/talks/Jan_Koenderink/LS_IllVis_01_Koenderink_Shape_in_visual_awareness\">Jan Koenderink, Shape in Visual Awareness</a></p>\r\n",
        "start_date": "2013-01-01",
        "end_date": "2020-12-31",
        "leader_id": 171,
        "logo": {
            "name": "logo.png",
            "path": "project:illvisation",
            "type": "image/png",
            "size": 58061,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/illvisation/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "eu7",
                "contract_number": "PCIG13-GA-2013-618680",
                "comment": null
            },
            {
                "id": "WWTF",
                "contract_number": "VRG11-010",
                "comment": null
            }
        ],
        "research_areas": [
            "BioVis",
            "IllVis"
        ],
        "publications": [
            "2013_Viola_Ivan_2013_MTS",
            "2017-kouril-illcell",
            "bernhard-2014-GTOM",
            "bernhard-2016-gft",
            "birkeland_aasmund_2014_pums",
            "boesch_2014_browserHistoryVis",
            "cellVIEW_2015",
            "Cornel2016CFM",
            "Diehl_2015",
            "dietrich-2016-viseq2",
            "dworschak-2016-szcm",
            "Gadllah_Hani_2016",
            "gangl-2018-gpsart",
            "Gehrer_Daniel_CUI",
            "Gehrer-2017-molmach",
            "glinzner-2016-tex",
            "Groeller_2016_I7",
            "horvath-2018-ism",
            "kapferer-2017-godot",
            "koch-2018-sso",
            "koehle-2013-sgv",
            "kolesar-ivan-2014-polymers",
            "kouril-2015-maya2cellview",
            "Krone2016VABC",
            "Langer_Maximillian_DMV",
            "lemuzic_2015_timelapse",
            "lemuzic-2014-ivm",
            "lemuzic-2015-hiv",
            "lemuzic-mindek-2016-viseq",
            "lemuzic-mindek-sorger-cgfcc",
            "lipp-2017-mgpu",
            "lipp-2017-vulkan",
            "miao_tvcg_2017",
            "miao_tvcg_2018",
            "mindek-2015-mc",
            "mindek-2016-utah-talk",
            "mindek-2017-dsn",
            "mindek-2017-marion",
            "mindek-2018-fyi",
            "mindek-2019-mci",
            "moerth-2018-tpose",
            "Plank_Pascal_2015_HVP",
            "plank-2017-sldg",
            "puenguentzky-2014-ht",
            "rasch-2016-imgses",
            "Reisacher_Matthias_CPW",
            "Reisacher2016",
            "Sbardellati-2019-vcbm",
            "seyfert-2017",
            "sifuentes-2018-crowd",
            "Solteszova2016",
            "sorger-2016-fowardabstraction",
            "textures-3d-printing",
            "vad-2016-bre",
            "Viola_2013_IDV",
            "Viola_Ivan_2013_CAI",
            "Viola_Ivan_2013_D3D",
            "Viola_Ivan_2013_DC",
            "Viola_Ivan_2013_GS",
            "Viola_Ivan_2013_HQ3",
            "Viola_Ivan_2013_RMA",
            "Viola_Ivan_2013_RSb",
            "Viola_Ivan_2013_SVA",
            "Viola_Ivan_2013_VCA",
            "Viola_Ivan_2015_AAM",
            "Viola_Ivan_2015_MCT",
            "Viola_Ivan_2015_VBS",
            "Viola_Ivan_CLD",
            "Viola_Ivan_DAC",
            "Viola_Ivan_IIP",
            "Viola_Ivan_UVP",
            "Viola_Ivan_VDP",
            "viola-evr",
            "Waldin_Nicholas_2017_FlickerObserver",
            "waldin-2017-thesis",
            "waldner-2013-facetCloudsGI",
            "waldner-2013-ubiWM",
            "waldner-2014-af",
            "waldner-2014-ghi",
            "wu-2019-bmc"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/illvisation",
        "__class": "Project"
    },
    {
        "id": "ShapeAcquisition",
        "workgroup_id": "rend",
        "drupal_id": 4173,
        "drupal_path": "/research/projects/ShapeAcquisition",
        "name": "Real-Time Shape Acquisition with Sensor-Specific Precision",
        "name_de": null,
        "short_title": "ShapeAcquisition",
        "website": null,
        "status": "active",
        "short_abstract": "Acquiring shapes of physical objects in real time and with guaranteed precision to the noise model of the sensor devices.",
        "abstract": "<p>The core idea in this project is to capture the shape of physical objects in real time, with guaranteed precision, and to reconstruct the shape boundaries with minimal geometry. An example application is to let untrained users acquire shapes using emerging mobile sensing devices such as Google’s Project Tango. The user moves the sensor around the object, guided by immediate visual feedback on the input sampling quality. The output is a topologically clean mesh consisting of just the vertices required to represent its features to the desired approximation. The real-time reconstruction enables numerous geometry-processing applications to be taken online, such as shape retrieval/matching, harvesting real-world geometry into a cloud, perspective photo correction, interactive modeling, augmented reality, physics simulation, or fabrication.</p>\r\n",
        "start_date": "2015-12-01",
        "end_date": "2020-11-30",
        "leader_id": 193,
        "logo": {
            "name": "logo.jpg",
            "path": "project:ShapeAcquisition",
            "type": "image/jpeg",
            "size": 20004,
            "orig_name": "logo.jpg",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/ShapeAcquisition/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P24600-N23",
                "comment": null
            }
        ],
        "research_areas": [
            "Geometry"
        ],
        "publications": [
            "erler_2024_ppsurf",
            "erler_philipp-2017-phd",
            "erler-2020-p2s",
            "forsythe-2016-ccm",
            "grossmann-2016-baa",
            "gruber_horst-2019-baa",
            "koeppel-2016-baa",
            "novak_martin-2018-baa",
            "ohrhallinger_stefan-2018-cgf",
            "ohrhallinger_stefan-2018-pg",
            "ohrhallinger-2016-sgp",
            "pointner_michael-2017-baa",
            "Radwan_2021_Occ",
            "Radwan-2017-Occ",
            "reinwald-2017-baa",
            "schaffarz_2018-pra",
            "schartmueller_2018_baa"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/ShapeAcquisition",
        "__class": "Project"
    },
    {
        "id": "PathSpace",
        "workgroup_id": "rend",
        "drupal_id": 4175,
        "drupal_path": "/research/projects/PathSpace",
        "name": "Path-Space Manifolds for Noise-Free Light Transport",
        "name_de": null,
        "short_title": "PathSpace",
        "website": null,
        "status": "active",
        "short_abstract": "The project aims to develop new statistical and algorithmic methods to improve light-transport simulation for offline rendering.",
        "abstract": "The synthesis of photorealistic images has always been a major challenge in the field of computer graphics and is of great relevance for many applications such as motion picture production, computer games and architectural lighting simulations. Despite our detailed knowledge behind the physical processes of light transport, accurate and efficient simulation is still a challenge and subject to a vast amount of research work. This is due to the fact that light transport is extremely intricate: a single photon, emitted from a particular light source, can interact many times with various materials until it finally reaches the human eye.\n\nCurrent state-of-the-art methods in the field of photorealistic rendering rely on Monte Carlo integration of the incident radiance. As opposed to traditional physical applications, where the solution emerges as the result of one Monte Carlo process, photorealistic rendering is, by principle, different: a separate Monte Carlo process is run for every pixel, resulting in visually unpleasant images. We show that in order to completely eliminate the noise, one either has to resort to rendering an enormous amount of samples per pixel (up to the order of hundreds of thousands) or use noise filtering, where we show that as of now, all methods have their inherent drawbacks.\n\nHowever, our experiments and experience have led us to two key insights that will open up the road towards noise-free photorealistic rendering:\n\nOur first key insight is that the most difficult light-transport phenomena usually exist on a low-dimensional manifold that can be specifically sampled exhaustively, resulting in images that are completely noise-free with respect to these difficult light-transport situations.\n\nOur second key insight is that noise filtering is a viable way to mitigate noise for simple light transport effects, such as low-frequency illumination on diffuse surfaces. \n\nConsequently, the goal of this project is to develop a fundamentally new method that it combines the approaches of (1) exhaustively sampling special path-space manifolds and (2) applying noise filtering to the remaining space. To this end, we will provide a mathematical model to quantify the effectiveness of modern noise-filtering methods.\n\nMore concretely, the project will make the following contributions to photorealistic rendering:\n\n(1) An analysis and visualization of the space of light paths to build a better understanding how most state-of-the-art algorithms can be improved to perform better in challenging light-transport situations.\n(2) A new class of algorithms that are capable of rendering the most difficult light-transport situations effectively in a fundamentally new way that guarantees that these phenomena always appear completely smooth and converged.\n\n(3) A new way of combining noise-filtering techniques with photorealistic rendering algorithms, where each technique is used for the subclass of light paths where it is the most useful.\n",
        "start_date": "2015-11-01",
        "end_date": "2020-10-31",
        "leader_id": 193,
        "logo": {
            "name": "logo.jpg",
            "path": "project:PathSpace",
            "type": "image/jpeg",
            "size": 1534101,
            "orig_name": "logo.jpg",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/PathSpace/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P27974",
                "comment": null
            }
        ],
        "research_areas": [
            "Rendering"
        ],
        "publications": [
            "CORNEL-2017-FRS",
            "SAKAI-2015-EAPI",
            "zsolnai-2018-gms",
            "zsolnai-feher-thesis-2019",
            "zsolnaifeher-2019-pme",
            "zsolnaifeher-2020-pme"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/PathSpace",
        "__class": "Project"
    },
    {
        "id": "AR-AQ-Bau",
        "workgroup_id": "vr",
        "drupal_id": 4177,
        "drupal_path": "/research/projects/AR-AQ-Bau",
        "name": "Use of Augmented Reality for Building Inspection and Quality Assurance on Construction Sites",
        "name_de": null,
        "short_title": "AR-AQ-Bau",
        "website": null,
        "status": "active",
        "short_abstract": "The aim of this research project is the development of a construction site-suitable augmented reality (AR) system included a Remote-Expert-System and a BIM-Closed-Loop data transfer system for improving the quality of construction, building security and energy efficiency as well as increasing the efficiency of construction investigation.",
        "abstract": "The aim of this research project is the development of a construction site-suitable augmented reality (AR) system for improving the quality of construction, building security and energy efficiency as well as increasing the efficiency of construction investigation. The potential and the requirements of AR in the phases of construction and operation should be determined. \nThis is relevant for everyone involved in the construction process. The project team combines research expertise from construction process management, augmented reality, BIM modeling and international engineering experience and will test the developments on renowned construction projects (incl. Future Art Lab and University Hospital St. Pölten).\n\n<b>Current situation in the construction industry</b>\nThe construction industry has been one of the least digitized industries so far. At present, progress evaluation, functional tests, and investigation of deficiencies is still mainly done manually by paper and e-mails. A special case is the installation (HVAC), which has become increasingly complex and now accounts for up to 35 % of the construction costs of buildings. The AR-AQ-Bau project therefore focuses on the acceptance of HVAC-relevant systems and can in future be extended to any areas under construction.\n\n<b>Objectives of the AR system</b>\nThe AR-AQ-Bau project develops an advanced AR system for the acceptance control of energy-relevant systems in the field of HVAC. The starting point is the BIM model. All information in the BIM model should be available to all parties in the construction process and should be kept up-to-date in a closed loop approach for the first time. Through this “closed loop data communication”, construction progress and investigation on the construction site can be marked in the AR model and thus kept up-to-date. The project focuses on interaction systems in order to transfer comments, images, thermal image recordings and information of new components into the AR model and then to transfer it into the BIM model (“closed loop”). This makes this information visible to everyone involved in the building process. The thermal imager allows function control and detection of heat losses in the installation system. With the new Remote Expert system external experts can be connected to the AR system to support the construction investigation. The experts see the same AR model and can insert instructions in the AR model for the people on the construction site. These features increase the energy efficiency of the completed structures, as many faults are only discovered with these features. Another challenge in the project is the reliable tracking of the AR models in construction site environment. The currently existing tracking systems cannot cope with the difficult conditions on construction sites and must be adapted.\n\n<b>Results and findings</b>\nThe result of the project is the AR acceptance and quality assurance system for construction sites with the features described above. The findings from the investigations serve as the basis for the economic use of AR on construction site. Another possible application is the future use in the remote maintenance of the supply network of Wiener Netze (supporting partner) to improve the energy efficiency of the urban infrastructure.",
        "start_date": "2018-10-01",
        "end_date": "2020-09-30",
        "leader_id": 378,
        "logo": {
            "name": "logo.jpg",
            "path": "project:AR-AQ-Bau",
            "type": "image/jpeg",
            "size": 130796,
            "orig_name": "logo.jpg",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/AR-AQ-Bau/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": "867375",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/AR-AQ-Bau",
        "__class": "Project"
    },
    {
        "id": "SCI_BIM",
        "workgroup_id": "rend",
        "drupal_id": 4179,
        "drupal_path": "/research/projects/SCI_BIM",
        "name": "Scanning and data capturing for Integrated Resources and Energy Assessment using Building Information Modelling",
        "name_de": null,
        "short_title": "SCI_BIM",
        "website": null,
        "status": "active",
        "short_abstract": "The aim of the project is to increase the resources- and energy efficiency through coupling of various digital technologies and methods for data capturing (geometry and materials composition) and modelling (as-built BIM), as well as through gamification.\n\nCollaborative project with several companies and institutes.",
        "abstract": "Building stocks and infrastructures are the largest material stock of industrial economies. These total material stocks on the global scale are about as large as reserves of primary resources in nature. It is of long-term importance to maintain or frequently recycle these urban stocks, and in consequence to minimize the use of primary resources and thus the dependency on imports – a strategy labelled as “Urban Mining”. Simultaneously, buildings consume worldwide 40% of energy and produce about 30% of global CO2 emissions. With a construction rate of only 2%, building stocks are crucial for minimization of energy consumption. Due to worldwide rapidly increasing consumption of resources and land, as well as growing generation of waste, increasing of recycling rates and reuse of materials, next to reduction of energy consumption is of highest priority for achievement of sustainability. \nThe aim of this project is increasing of resources and energy efficiency using gamification concept, through coupling of technologies and methods for capturing and modelling (as-built BIM) of buildings and assets (geometry and material composition). Using a real case (TU Wien, Aspanggründe) the Integrated Data Capturing and Modelling Methods will be tested and evaluated in terms of costs and benefits. Thereby for capturing of geometry we will use laser scanning and photogrammetry, and for capturing of material composition Ground Penetrating Radar (GPR). Finally, a Proof of Concept for the suitability of GPR for material capturing and modelling via semi-automatic Scan to BIM process for generation of information-rich as-built BIM from a Point-Cloud will be compiled, which would enable efficient generation of models for Material Passports or BEM - Building Energy Modelling. \nWithin this project we will develop the innovative gamification concept, where through user participation (users take photo via smartphone, which is uploaded in the photogrammetric as-built BIM within the gamification platform) the structural changes and user behavior (such as open windows or lighting) can be assessed. Through implementation of user data, the as-built BIM is updated. On the one hand, the structural changes will be captured (static data) and on the other, the user behavior model for operational building automation (dynamic) will be compiled.\nThrough compilation of Proof of Concept for GPR a research gap will be closed – the capturing and modelling of geometry is already well explored, however the methods and tools for capturing and modelling of material composition of buildings are largely lacking. As significant innovative contribution of this project the semi-automated recognition of BIM-Objects from the Point-Cloud, as well as the use of gamification for reduction of energy consumption together with automated update of as built-BIM4FM can be identified. Thereby the automated generation of material passport at the end of the lifecycle will be enabled, thus delivering useful information for the material cadaster as well as for the assessment of the material value of a building.",
        "start_date": "2018-09-01",
        "end_date": "2020-08-31",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:SCI_BIM",
            "type": "image/png",
            "size": 20662,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/SCI_BIM/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": "867314",
                "comment": "Stadt der Zukunft"
            }
        ],
        "research_areas": [
            "Geometry",
            "Modeling"
        ],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/SCI_BIM",
        "__class": "Project"
    },
    {
        "id": "d4335",
        "workgroup_id": "vis",
        "drupal_id": 4335,
        "drupal_path": "/research/projects/Smile-Designer-3D",
        "name": "Smile Designer 3D",
        "name_de": "Smile Designer 3D",
        "short_title": "Smile Designer 3D",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2016-05-23",
        "end_date": "2020-06-30",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Smile-Designer-3D",
        "__class": "Project"
    },
    {
        "id": "ProKeyAnim",
        "workgroup_id": "vis",
        "drupal_id": 4181,
        "drupal_path": "/research/projects/ProKeyAnim",
        "name": "Procedural Keyframe Animation for 3D Mesoscale Models",
        "name_de": null,
        "short_title": "ProKeyAnim",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2019-07-01",
        "end_date": "2020-06-30",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [
            {
                "id": "KAUST",
                "contract_number": "OSR-2019-CPF-4108.1",
                "comment": "Projektsumme: US$ 121.191,00"
            }
        ],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/ProKeyAnim",
        "__class": "Project"
    },
    {
        "id": "OpenData",
        "workgroup_id": "rend",
        "drupal_id": 4183,
        "drupal_path": "/research/projects/OpenData",
        "name": "A Test Suite for Photorealistic Rendering and Filtering",
        "name_de": null,
        "short_title": "OpenData",
        "website": null,
        "status": "active",
        "short_abstract": "This project will research methods to test and compare global-illumination algorithms as well as filtering algorithms, and also develop test data sets for this purpose.",
        "abstract": "Rendering photorealistic images has been a long-standing problem in computer graphics. Photorealistic image synthesis is still a vibrant and active research field with many open problems. Most offline methods use path-sampling techniques to evaluate the rendering equation to account for sophisticated light-transport effects. However, this procedure takes up to hours, where the inaccuracy of the initial estimation shows up as noise in the resulting images. In order to alleviate this, sophisticated light-transport algorithms and a number of noise-filtering techniques have been developed. Despite the fact that a large body of research exists in both directions, there are no standardized datasets that enable us to adequately assess their strengths and weaknesses. It would be of utmost importance to be able to compare existing light-transport and noise-filtering algorithms in a scientifically sound way.\nIn this project, we will therefore create such a dataset, and provide the following contributions:\n(1) a set of scene descriptions that can be used to test individual features of these systems, e.g., dealing with a variety of material models, high-resolution geometry, textured inputs, and a variety of lighting effects.\n(2) a large number of rendered images of these scenes with different noisiness, auxiliary buffers to maximize compatibility with the state-of-the-art noise filtering algorithms, and fully converged reference images for easy comparisons against the denoised outputs, and\n(3) a method to ensure parameter coverage, so that the dataset does not become prohibitively large, but still covers salient rendering configurations, that reveal the most interesting cases.\nIn summary, we propose to create a fertile ground for assessing the quality of different photorealistic rendering techniques. We believe that this would lead to significantly higher quality scientific works in the field.",
        "start_date": "2017-05-01",
        "end_date": "2020-04-30",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:OpenData",
            "type": "image/png",
            "size": 3775514,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/OpenData/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "ORD 61",
                "comment": null
            }
        ],
        "research_areas": [
            "Rendering"
        ],
        "publications": [
            "brugger-2020-tdp",
            "brugger-2020-tsdpbr",
            "celarek_adam-2019-qelta",
            "CELAREK-2017-QCL",
            "freude_2020_rs",
            "freude-2023-sem",
            "wiesinger_2020_odpr"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/OpenData",
        "__class": "Project"
    },
    {
        "id": "MAKE-IT-FAB",
        "workgroup_id": "rend",
        "drupal_id": 4185,
        "drupal_path": "/research/projects/MAKE-IT-FAB",
        "name": "MAKE-IT-FAB: Modeling of Shapes for Personal Fabrication",
        "name_de": null,
        "short_title": "MAKE-IT-FAB",
        "website": null,
        "status": "active",
        "short_abstract": "The aim of this project is to investigate and to contribute to shape modeling and geometry processing for personal fabrication---a trend that currently receives intensified attention in the science and industry. Our goal is to contribute novel algorithmic solutions for fabrication-aware shape processing and interactive modeling. ",
        "abstract": "The aim of this project is to investigate and to contribute to shape modeling and geometry processing for personal fabrication---a trend that currently receives intensified attention in the science and industry. \nOur goal is to contribute novel algorithmic solutions for fabrication-aware shape processing and interactive modeling. In particular, we want to research following aspects: (i) We want to address the currently pending problem of high-level shape understanding, (ii) we want to research novel high-level shape modeling methods for interactive creation of plausible and aesthetically appealing shapes, and finally, (iii) we want to address optimization problems related to instant desktop 3D printing, such that modeled objects can be directly fabricated. The outcome of the project will be a set of algorithmic solutions for high-level shape modeling for instant personal fabrication.",
        "start_date": "2015-04-01",
        "end_date": "2020-03-31",
        "leader_id": 844,
        "logo": {
            "name": "logo.jpg",
            "path": "project:MAKE-IT-FAB",
            "type": "image/jpeg",
            "size": 80511,
            "orig_name": "logo.jpg",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/MAKE-IT-FAB/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P27972-N31",
                "comment": null
            }
        ],
        "research_areas": [
            "Fabrication"
        ],
        "publications": [
            "aichner-2016-sadf",
            "birsak-2017-dpe",
            "birsak-thesis",
            "Birsak2018-SA",
            "erler-2020-p2s",
            "gersthofer-2016-sosob",
            "hafner-2015-eigf",
            "hafner-2015-onff",
            "leimer_2017_rbpesc",
            "leimer_2020-cag",
            "leimer-2016-coan",
            "leimer-2016-rpe",
            "leimer-2018-sar",
            "musialski_2016_sosp",
            "musialski-2015-ista",
            "musialski-2015-pixel",
            "musialski-2015-souos",
            "musialski-2015-vrvis",
            "steiner_2016_isad",
            "WINKLER-2019-PDG"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/MAKE-IT-FAB",
        "__class": "Project"
    },
    {
        "id": "Animated Cell Tab development",
        "workgroup_id": "vis",
        "drupal_id": 4190,
        "drupal_path": "/research/projects/Animated-Cell-Tab-development",
        "name": "Animated Cell Tab development",
        "name_de": null,
        "short_title": "Animated Cell Tab development",
        "website": null,
        "status": "active",
        "short_abstract": "Animated Cell Tab development",
        "abstract": null,
        "start_date": "2016-11-14",
        "end_date": "2019-12-31",
        "leader_id": 171,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Animated-Cell-Tab-development",
        "__class": "Project"
    },
    {
        "id": "Illustrare",
        "workgroup_id": "vis",
        "drupal_id": 4189,
        "drupal_path": "/research/projects/Illustrare",
        "name": "ILLUSTRARE: Integrative Visual Abstraction of Molecular Data",
        "name_de": null,
        "short_title": "Illustrare",
        "website": null,
        "status": "active",
        "short_abstract": "FWF - I 2953-N31\nIntegrative Visual Abstraction of Molecular Data",
        "abstract": "In many aspects of our modern lives we face the problem of having to make sense of large amounts of data. This applies to scientists trying to make sense of their experiments and simulations, to bankers and traders trying to understand the dynamics of the financial markets, and to large enterprises which need to understand the principles behind demand and supply—to name just a few examples. The basic problem in all these cases is that people need to identify important and/or unexpected features in large simulated or captured datasets. Visualization is the domain that facilitates this process of sense-making by dramatically simplifying the process of obtaining an understanding of the data—by representing data visually and thus amplifying people’s cognition. This inherent capability of (good) visualization techniques to amplify human cognition, however, is no longer enough to be able to make sense of today’s huge datasets. To be able to see the essential aspects we need dedicated mechanisms that abstract away the (unnecessary) detail to, in turn, allow the user of the visualization to focus on the important elements. The crucial problem in this context is that it is impossible to know what is important and what is not in a general way—importance changes based on the research question, on the application domain, on the data size, on the user, on the specific situation, etc. Visualization technology therefore needs to support dynamic change of visual abstraction of the data to reflect these contextual changes. The fundamental research challenge in visualization for us is to get an understanding of what (visual) abstraction really is, what it means, how it can be controlled, and how it is, can be, and should be used in visualization.\n",
        "start_date": "2017-01-01",
        "end_date": "2019-12-31",
        "leader_id": 171,
        "logo": {
            "name": "logo.jpg",
            "path": "project:Illustrare",
            "type": "image/jpeg",
            "size": 95116,
            "orig_name": "logo.jpg",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/Illustrare/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "I 2953-N31",
                "comment": null
            }
        ],
        "research_areas": [
            "BioVis",
            "IllVis"
        ],
        "publications": [
            "Halladjian_2020",
            "kouril-2018-LoL",
            "lawonn-2018-illvisstar",
            "miao_inria_2017",
            "miao_kaust_2018",
            "miao_nantech_2019",
            "miao_nar_2020",
            "miao_tvcg_2018",
            "miao2018Dimsum",
            "Miao2018FDN",
            "Viola-Pondering-2017"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Illustrare",
        "__class": "Project"
    },
    {
        "id": "EvaluARte",
        "workgroup_id": "vr",
        "drupal_id": 4192,
        "drupal_path": "/research/projects/EvaluARte",
        "name": "EvaluArte: Systematic Evaluation for AR Controllers",
        "name_de": null,
        "short_title": "EvaluARte",
        "website": null,
        "status": "active",
        "short_abstract": "The goal of this project is the development of a systematic evaluation methodology, the evaluation of AR controllers for industrial tasks by utilizing the developed methodology and the publication of guidelines for developers  of  AR  controllers,  user  interface  designers,  AR  developers  in  general  and  the  AR  research  community. ",
        "abstract": "In this project, we proposed to develop the first systematic evaluation methodology for Augmented Reality (AR) controllers. We proposed to combine new technologies that have not been used for the evaluation of controllers in AR before, to evaluate the performance and functionality of AR controllers, to enhance usability, to develop standard widgets for 3D interfaces and to develop guidelines for the improvement of controllers.\n\nIn course of the project, we analysed user requirements for AR in the industrial domain, developed gaze-free gesture interaction with AR controllers, and created a widget library for AR controllers providing a standardized set of recommended widgets for developers.\n",
        "start_date": "2017-08-01",
        "end_date": "2019-07-31",
        "leader_id": 378,
        "logo": null,
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [
            "VR"
        ],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/EvaluARte",
        "__class": "Project"
    },
    {
        "id": "Smile Analytics",
        "workgroup_id": "vis",
        "drupal_id": 4194,
        "drupal_path": "/research/projects/Smile-Analytics",
        "name": "Smile Analytics: Visual Analytics for Realistic and Aesthetic Smile Design",
        "name_de": null,
        "short_title": "Smile Analytics",
        "website": null,
        "status": "active",
        "short_abstract": "The aim of the project is to improve the digital fabrication of dental prosthetic devices. We employ state of the art visualization techniques to enable a dental pretreatment preview for the patients.",
        "abstract": "The aim of the project is to improve the digital fabrication of dental prosthetic devices. We employ state of the art visualization techniques to enable a dental pretreatment preview for the patients.",
        "start_date": "2017-07-01",
        "end_date": "2019-06-30",
        "leader_id": 869,
        "logo": null,
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": "861168",
                "comment": "Basisprogramm Einzelprojekt"
            }
        ],
        "research_areas": [
            "MedVis"
        ],
        "publications": [
            "amirkhanov-2018-withteeth",
            "amirkhanov-2021-diss"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Smile-Analytics",
        "__class": "Project"
    },
    {
        "id": "BioNetIllustration",
        "workgroup_id": "vis",
        "drupal_id": 4196,
        "drupal_path": "/research/projects/BioNetIllustration",
        "name": "BioNetIllustration: User Centric Illustrations of Biological Networks",
        "name_de": null,
        "short_title": "BioNetIllustration",
        "website": null,
        "status": "active",
        "short_abstract": "<p>In living systems, one molecule is commonly involved in several distinct physiological functions. The roles of molecules are commonly summarized in pathway diagrams, which, however, are abstract, hierarchically nested and thus is difficult to comprehend especially by non-expert audience. The primary goal of this research in visualization is to intuitively support the comprehensive understanding of relationships among biological networks using interactively computed illustrations. Illustrations, especially in textbooks of biology are carefully designed to clearly present reactions between organs as well as interactions within cells. Automatic generation of illustrative visualizations of biological networks is thus the technical content of this proposal. Automatic generation of hand-drawn illustrations has been a challenging task due to the difficulty of algorithmically describing a human creative process such as evaluating and selecting significant information and composing meaningful explanations in a visually plausible manner. The project also involves experts from several disciplines including network and medical visualization, data mining, systems biology as well as perceptual psychology. The result will provide a new direction for physiological process analysis and accelerate the knowledge transfer not only within experts but also to the public. Acknowledgment: The project has received funding from the European Union Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No. 747985.</p>\r\n",
        "abstract": "<p>In living systems, one molecule is commonly involved in several distinct physiological functions. The roles of molecules are commonly summarized in pathway diagrams, which, however, are abstract, hierarchically nested and thus is difficult to comprehend especially by non-expert audience. The primary goal of this research in visualization is to intuitively support the comprehensive understanding of relationships among biological networks using interactively computed illustrations. Illustrations, especially in textbooks of biology are carefully designed to clearly present reactions between organs as well as interactions within cells. Automatic generation of illustrative visualizations of biological networks is thus the technical content of this proposal. Automatic generation of hand-drawn illustrations has been a challenging task due to the difficulty of algorithmically describing a human creative process such as evaluating and selecting significant information and composing meaningful explanations in a visually plausible manner. The project also involves experts from several disciplines including network and medical visualization, data mining, systems biology as well as perceptual psychology. The result will provide a new direction for physiological process analysis and accelerate the knowledge transfer not only within experts but also to the public. Acknowledgment: The project has received funding from the European Union Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No. 747985.</p>\r\n",
        "start_date": "2017-06-21",
        "end_date": "2019-06-20",
        "leader_id": 1464,
        "logo": {
            "name": "logo.png",
            "path": "project:BioNetIllustration",
            "type": "image/png",
            "size": 16337,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/BioNetIllustration/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "Horizon 2020",
                "contract_number": "747985",
                "comment": null
            }
        ],
        "research_areas": [
            "BioVis",
            "InfoVis"
        ],
        "publications": [
            "cmolik-2020-tvcg",
            "kouril-2018-LoL",
            "mizuno-2019-eurovis",
            "Purchase-2020-gd",
            "rinortner_susanne-2019-vpicc",
            "Sbardellati-2019-vcbm",
            "wu-2017-dagstuhl",
            "wu-2018-JVLC",
            "wu-2018-metabo",
            "wu-2018-shonan",
            "wu-2018-story",
            "wu-2019-bmc",
            "wu-2019-report",
            "wu-2019-smw",
            "wu-2019-vcbm",
            "wu-2019-visworkshop",
            "wu-2020-tvcg",
            "YOGHOURDJIAN2019"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/BioNetIllustration",
        "__class": "Project"
    },
    {
        "id": "d4330",
        "workgroup_id": "vr",
        "drupal_id": 4330,
        "drupal_path": "/research/projects/Gebäudesoftskills",
        "name": "Qualification Network for Human Sciences and Structural Engineering",
        "name_de": "Qualifizierungsnetzwerk Humanwissenschaften und Bautechnik",
        "short_title": "Gebäudesoftskills",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2017-01-01",
        "end_date": "2018-12-31",
        "leader_id": 378,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Gebäudesoftskills",
        "__class": "Project"
    },
    {
        "id": "deskollage",
        "workgroup_id": "vis",
        "drupal_id": 4198,
        "drupal_path": "/research/projects/deskollage",
        "name": "Visual Information Foraging on the Desktop",
        "name_de": null,
        "short_title": "deskollage",
        "website": null,
        "status": "active",
        "short_abstract": "The goal of this project is to design and develop novel interactive visualization techniques to support knowledge workers in making sense of their unstructured, dynamic information collections. ",
        "abstract": "Knowledge workers, such as scientists, journalists, or consultants, adaptively seek, gather, and consume information. Initially, knowledge workers may not even know what exactly they are looking for. Only after investigating more and more information, they develop a more concrete mental picture. These “information foraging” and “sensemaking” processes are often inefficient as existing user interfaces provide limited possibilities to combine information from various sources and different formats into a common knowledge representation. The goal of this project was to facilitate such knowledge-intensive tasks through user interfaces that combine manual organization strategies, such as piling of papers and text annotations, with powerful automatic data processing to reveal hidden relations in the collected data.\n \nThis project led to multiple novel user interface concepts that support knowledge-intensive tasks. For instance, <a href=\"https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/\">visual query expansion</a> is an extension to a search engine that visualizes the effects of suggested variations on an online query result to help users to refine their query terms. The <a href=\"https://www.cg.tuwien.ac.at/research/publications/2019/2019-ic/\">information collage</a> lets users capture information fragments from any online sources, which can then be freely recomposed into a common knowledge representation. Natural language processing helps users to identify common topics in their collage. <a href=\"https://users.cg.tuwien.ac.at/~waldner/bicflows/\">BiCFlows</a> helps users to discover hidden relations in very large information collections. For instance, they can explore which authors tend to contribute to similar topics. \n\nThe user interfaces served as foundation for empirical research with users from different knowledge work domains. This research provided new insights into the strengths but also the limitations of interactive visualization for information foraging and sensemaking. On the one hand, the studies show that, in some situations, interactive visualization can be less efficient than working with unprocessed text-based information. On the other hand, the studies also show that combining automatic data analysis with interactive visualization can lead to more unexpected findings, especially when exploring large data. The extent of manual information organization varies with the task: the less users know about the gathered information, the more automatic organization they expect. \n\nIn summary, this project contributed novel user interface concepts and design guidelines for user interfaces supporting information foraging and sensemaking. The techniques and study results were published at high-ranking peer-reviewed venues, and it can be expected that the findings of this project shape the way how users seek and organize their information in the future. \n\n",
        "start_date": "2015-12-01",
        "end_date": "2018-12-31",
        "leader_id": 1110,
        "logo": {
            "name": "logo.png",
            "path": "project:deskollage",
            "type": "image/png",
            "size": 331697,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/deskollage/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "T 752-N30",
                "comment": null
            }
        ],
        "research_areas": [
            "InfoVis"
        ],
        "publications": [
            "2019-ic",
            "byska-2019-mdfc",
            "casellato-2016-pkg",
            "cizmic-2018-evd",
            "donabauer-2015-asc",
            "dworschak-2016-szcm",
            "edlinger-2018-vwr",
            "georgiev-2019-cbg",
            "geymayer-2017-std",
            "gundacker-2020-wlm",
            "gusenbauer-2018",
            "hromniak-2019-vcn",
            "Koszticsak-2017-ewt",
            "mazurek-2017-sio",
            "mazurek-2017-vows",
            "mazurek-2018-vac",
            "mazurek-2018-veq",
            "polatsek-2018-stv",
            "samoul-2019-cnp",
            "sietzen-2019-wnn",
            "sietzen-ifv-2019",
            "smiech-2018-tei",
            "steinboeck-2017-vbn",
            "steinboeck-2017-vefp",
            "steinboeck-2018-lbg",
            "trautner-2018-imd",
            "unger-2019_vcp",
            "Waldin_Nicholas_2016_Chameleon",
            "Waldin_Nicholas_2017_FlickerObserver",
            "waldin-2019-ccm",
            "Waldner_2017_11",
            "waldner-2017-vph",
            "waldner-2018-ved",
            "waldner-2019-rld",
            "waldner-2020-tbg"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/deskollage",
        "__class": "Project"
    },
    {
        "id": "d4338",
        "workgroup_id": "vr",
        "drupal_id": 4338,
        "drupal_path": "/research/projects/Haas-VR",
        "name": "Presentation of virtual machines",
        "name_de": "Erstellung einer virtuellen Maschinenpräsentation",
        "short_title": "Haas VR",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2018-04-01",
        "end_date": "2018-10-31",
        "leader_id": 378,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Haas-VR",
        "__class": "Project"
    },
    {
        "id": "d4340",
        "workgroup_id": "vr",
        "drupal_id": 4340,
        "drupal_path": "/research/projects/ARPathVis",
        "name": "Realistic Indoor Path Visualization with Real-Time Obstacle Avoidance in Augmented Reality",
        "name_de": "Realistische Pfadvisualisierung eines Navigationssystems für Innenräume in Augmented Reality",
        "short_title": "ARPathVis",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": "<p>Accurate navigation systems for large indoor environments utilizing visual features, which are able to<br> provide real time 3D positions and rotations, are currently under development. However, research on indoor navigation and especially augmented reality (AR) indoor path generation and visualization is sparse.<br> Recent methods that focus mainly on outdoor environments are not suitable for indoor scenarios due to the complexity of floor plans, the limited field-of-view of AR devices, and do not consider fast changes in the viewing direction while being guided. Therefore new methods for path planning, obstacles avoidance and navigation visualization have to be developed.<br> In this project, we propose novel navigation methods to assist mobile indoor navigation by utilizing AR. We present a new dynamic path planning algorithm with real-time obstacle avoidance reacting to changing environments. We also propose three new navigation visualization methods utilizing AR: particles, object-following, and realistic human avatars as guides. In addition, we plan to research real light estimation from shadows using a monocular moving RGB-D camera for realistic lighting, to embed an avatar as a guide. For navigation visualization, different stages of realism will be developed and evaluated on mobile devices. Finally, we will research and integrate haptic feedback to aid navigation.<br> We plan to evaluate the developed algorithms in comprehensive user studies with respect to the efficiency in navigation, sense of presence and user comfort. We expect our approaches to advance theory and practice of personal indoor navigation.</p>",
        "start_date": "2016-03-01",
        "end_date": "2018-08-31",
        "leader_id": 378,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/ARPathVis",
        "__class": "Project"
    },
    {
        "id": "d4342",
        "workgroup_id": "rend",
        "drupal_id": 4342,
        "drupal_path": "/research/projects/KMU-Qualifizierung",
        "name": "KMU Qualifizierungsseminar Rekonstruktion und Virtual Reality",
        "name_de": "KMU Qualifizierungsseminar Rekonstruktion und Virtual Reality",
        "short_title": "KMU Qualifizierung",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2017-12-01",
        "end_date": "2018-03-31",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/KMU-Qualifizierung",
        "__class": "Project"
    },
    {
        "id": "ManyViews",
        "workgroup_id": "vis",
        "drupal_id": 4199,
        "drupal_path": "/research/projects/ManyViews",
        "name": "ManyViews: Integrating Narrations, Observations, and Insights",
        "name_de": null,
        "short_title": "ManyViews",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2016-01-01",
        "end_date": "2017-12-31",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/ManyViews",
        "__class": "Project"
    },
    {
        "id": "PAMINA",
        "workgroup_id": "rend",
        "drupal_id": 4201,
        "drupal_path": "/research/projects/PAMINA",
        "name": "Data-Driven Procedural Modeling of Interiors",
        "name_de": null,
        "short_title": "PAMINA",
        "website": null,
        "status": "active",
        "short_abstract": "The project develops new procedural modeling methods for interior scenes.",
        "abstract": "We propose new methods to allow efficient and interactive procedural generation of building interiors. The goal will be to use procedural modeling, graph theory and optimization methods for the automated design of highly detailed, furnished rooms.\n\nOur techniques will greatly extend the range of high-quality indoor environments that can be efficiently created by individual users. This will be achieved by (1) a high level of abstraction by description of object classes, (2) the automated creation of designs, and (3) the reusability and variability of created designs.\n\nTarget applications include indoor architecture design, everyday furniture planning, real estates marketing, digital content creation for games and movies, facility management, historical and archaeological visualizations and many more.\n\nFor automated model creation, we will improve the concept of shape grammars. A data-driven design approach will transform semantic description of interior designs to geometric models. It will select and combine grammar rules from a special evolving repository and use the resulting procedures for creation of a large variety of original models. User interaction will be minimized by utilization of abstract descriptions, however direct geometry editing will be still possible.\n\nOur approach will also allow efficient editing of existing designs with automatic preservation of semantic and geometric plausibility. As interiors are often created in many iterations, the costs of refinement steps and error corrections will be strongly reduced.\n\nCurrent procedural techniques for interiors are focused on very specific, partial problems. Our results will become a part of a unified content production pipeline for creation of multi-scale urban environments from the city level all the way to furnished interiors of single houses. The research will be divided into three main computer graphics basic research problems:\n\n1.\tFloor planning divides a floor into rooms. Semantics and architectural knowledge determine their connections, sizes and shapes. Our contribution will be a graph-based layout algorithm for arbitrarily shaped rooms with a guarantee of no empty spaces and a minimum of parameters.\n\n2.\tFurniture placement arranges furniture items inside of a room. Mutual relations, functionality, ergonomics and room style determine the layout. Our contribution will be an automated optimization of layouts for illumination, ergonomics and emergency situations, as well as high-level control of layouts by semantic styling.\n\n3.\tFurniture generation creates furniture models, manages the configuration of their movable parts and applies textures. Our contribution will be general polyhedral furniture shapes, model generation inside of constrained spaces, and kinematics of models and posture selection.\n",
        "start_date": "2012-10-01",
        "end_date": "2017-09-30",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P24600-N23",
                "comment": null
            }
        ],
        "research_areas": [
            "Modeling"
        ],
        "publications": [
            "bernhard-2014-EFD",
            "eibensteiner-2016-pmg",
            "fan-2014-scfl",
            "fleiss-2015-da",
            "goetz_johann-cagmfp",
            "gurtler-2014-ssggm",
            "Ilcik_2015_LAY",
            "ilcik-2013-cipmi",
            "ilcik-2013-pmsg",
            "ilcik-2014-cgbpmi",
            "ilcik-2016-cmssg",
            "kreuzer-2016-isf",
            "leimer-2014-fsco",
            "pezenka_lukas2012-bspcga",
            "pogrzebacz-2014-gggm",
            "spitaler-2015-pbi",
            "steiner_2016_isad",
            "sturl_andreas-caflm",
            "VASILJEVS-2018-PMPL"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/PAMINA",
        "__class": "Project"
    },
    {
        "id": "ProcessIllVis",
        "workgroup_id": "vis",
        "drupal_id": 4203,
        "drupal_path": "/research/projects/ProcessIllVis",
        "name": "Illustrative Visualization of Processes",
        "name_de": null,
        "short_title": "ProcessIllVis",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2013-09-01",
        "end_date": "2017-08-31",
        "leader_id": 190,
        "logo": null,
        "funding_organisations": [
            {
                "id": "eu7",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/ProcessIllVis",
        "__class": "Project"
    },
    {
        "id": "INTRA-SPACE",
        "workgroup_id": "rend",
        "drupal_id": 4205,
        "drupal_path": "/research/projects/INTRA-SPACE",
        "name": "INTRA-SPACE: the reformulation of architectural space as a dialogical aesthetic",
        "name_de": null,
        "short_title": "INTRA-SPACE",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2015-04-01",
        "end_date": "2017-03-31",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/INTRA-SPACE",
        "__class": "Project"
    },
    {
        "id": "VISAR",
        "workgroup_id": "vis",
        "drupal_id": 4211,
        "drupal_path": "/research/projects/VISAR",
        "name": "VISAR - VISual Analytics And Rendering",
        "name_de": null,
        "short_title": "VISAR",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "2012-01-01",
        "end_date": "2016-12-31",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P 24597-N23",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/VISAR",
        "__class": "Project"
    },
    {
        "id": "Harvest4D",
        "workgroup_id": "rend",
        "drupal_id": 4207,
        "drupal_path": "/research/projects/Harvest4D",
        "name": "Harvest4D - Harvesting Dynamic 3D Worlds from Commodity Sensor Clouds",
        "name_de": null,
        "short_title": "Harvest4D",
        "website": null,
        "status": "active",
        "short_abstract": "Harvest4D investigates the whole acquisition, modeling and rendering pipeline for incidental data capture.",
        "abstract": "<p>The current acquisition pipeline for visual models of 3D worlds is based on a paradigm of planning a goal-oriented acquisition - sampling on site - processing. The digital model of an artifact (an object, a building,up to an entire city) is produced by planning a specific scanning campaign, carefully selecting the (often costly) acquisition devices, performing the on-site acquisition at the required resolution and then post-processing the acquired data to produce a beautified triangulated and textured model. However, in the future we will be faced with the ubiquitous availability of sensing devices that deliver different data streams that need to be processed and displayed in a new way, for example smartphones, commodity stereo cameras, cheap aerial data acquisition devices, etc. We therefore propose a radical paradigm change in acquisition and processing technology: instead of a goal-driven acquisition that determines the devices and sensors, we let the sensors and resulting available data determine the acquisition process. Data acquisition might become incidental to other tasks that evices/people to which sensors are attached carry out. A variety of challenging problems need to be solved to exploit this huge amount of data, including: dealing with continuous streams of time-dependent data, finding means of integrating data from different sensors and modalities, detecting changes in data sets to create 4D models, harvesting data to go beyond simple 3D geometry, and researching new paradigms for interactive inspection capabilities with 4D data sets. In this project, we envision solutions to these challenges, paving the way for affordable and innovative uses of information technology in an evolving world sampled by ubiquitous visual sensors. Our approach is high-risk and an enabling factor for future visual applications. The focus is clearly on basic research questions to lay the foundation for the new paradigm of incidental 4D data capture.</p>\r\n",
        "start_date": "2013-06-01",
        "end_date": "2016-07-31",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:Harvest4D",
            "type": "image/png",
            "size": 494905,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/Harvest4D/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "eu7",
                "contract_number": "323567",
                "comment": null
            }
        ],
        "research_areas": [
            "Geometry",
            "Modeling",
            "Rendering"
        ],
        "publications": [
            "arikan-2014-pcvis",
            "arikan-2015-dmrt",
            "laager_florian-2014-daa",
            "Mayrhauser-2016-Cnc",
            "MAYRHAUSER-2016-SCA",
            "ohrhallinger-2016-sgp",
            "preiner2014clop",
            "Radwan-2014-CDR",
            "scheiblauer-thesis",
            "SCHUETZ-2014-RRLP",
            "SCHUETZ-2015-HQP",
            "SCHUETZ-2016-POT",
            "WIMMER-2014-DWNT",
            "WIMMER-2016-FUTURIS",
            "WIMMER-2016-HARVEST4D"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Harvest4D",
        "__class": "Project"
    },
    {
        "id": "Mofa",
        "workgroup_id": "rend",
        "drupal_id": 4209,
        "drupal_path": "/research/projects/Mofa",
        "name": "Modern Functional Analysis in Computer Graphics",
        "name_de": null,
        "short_title": "Mofa",
        "website": null,
        "status": "active",
        "short_abstract": "The projects develops new mathematical foundations for global-illumination algorithms.",
        "abstract": "Creating photorealistic images has always been an essential goal in computer graphics. The image generation process builds on a complex mathematical construct, the so-called rendering equation. This equation defines how light interacts with surfaces in a virtual scene, and involves complex surface description models that describe important effects like reflections, glossy surface interactions, and indirect illumination. Solving this equation can be achieved by investing a large amount of time and computational resources, but intelligent methods have been found that greatly speed up the calculations up to an interactive or even real-time frame rates. Both interactive as well as any non-interactive applications such as computer games, visual effects, architectural lighting simulation, urban and automotive design, disaster simulation and many other applications that depend on an accurate light simulation, profit from efficient ways to calculate light transport.\nThese methods can be categorized by being a part of the mathematical field of functional analysis where a large body of research exists because it forms the basis for scientific fields such as quantum mechanics, chaos and ergodic theory, vision and signal processing besides countless specialized applications in areas like structural mechanics, simulation and other engineering problems. Applications of Fourier or Laplace transformations, Spherical Harmonics or Wavelets, just to name a few important approaches, are ubiquitous.\nHowever, despite the considerable amount of research work devoted to finding methods to calculate and analyze the complex light transport in a virtual scene, they remain challenging issues and many inherent properties of light transport are largely unknown.\nOver the course of the last 10 years, a more general form of wavelets, named anisotropic wavelets that introduce directionality to the basis definitions have been proposed. In particular, curvelets and contourlets have already proven to be powerful tools in astrophysics, seismology, fluid dynamics and vision due to their unique properties optimized for natural signals.\nYet, anisotropic wavelets have not been considered for light transport, though they have several advantages over standard wavelets such as a higher sparsity or near-optimal representation. Therefore, the main goal of this project is to develop methods based on anisotropic wavelets that calculate all aspects of light transport more efficiently, delivering a higher image quality with fewer resources, including an adaptation to all principal domains used in computer graphics.  Due to their properties, anisotropic wavelets also form an excellent foundation to perform a fundamental multi-scale and multi-directional analysis of light transport which leads to a better understanding and deeper into the process of light transport in virtual scenes.\n",
        "start_date": "2012-10-01",
        "end_date": "2016-05-31",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P23700-N23",
                "comment": null
            }
        ],
        "research_areas": [
            "Rendering"
        ],
        "publications": [
            "Auzinger_2013_NSAA",
            "Auzinger_2013_SAR",
            "Auzinger_2014_DCGI",
            "Auzinger_2014_UJA",
            "Auzinger-2015-IST",
            "auzinger-dissertation",
            "bernhard-2014-GTOM",
            "Habel_2012_PSP",
            "hecher-2014-MH",
            "intel2016",
            "Jimenez_SSS_2015",
            "LUKSCH-2013-FLM",
            "silvana_2014",
            "sippl-2014-fss",
            "zsolnai-ist-invited-2014"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Mofa",
        "__class": "Project"
    },
    {
        "id": "avr",
        "workgroup_id": "vis",
        "drupal_id": 4214,
        "drupal_path": "/research/projects/avr",
        "name": "Advanced Volume Rendering",
        "name_de": null,
        "short_title": "avr",
        "website": null,
        "status": "active",
        "short_abstract": "Advanced Volume Rendering",
        "abstract": null,
        "start_date": "2009-09-01",
        "end_date": "2015-12-31",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [
            "varchola_andrej-2012-fetoscopic"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/avr",
        "__class": "Project"
    },
    {
        "id": "Photo-Guide",
        "workgroup_id": "rend",
        "drupal_id": 4218,
        "drupal_path": "/research/projects/Photo-Guide",
        "name": "Photo-Guide: Image-Based City Exploration",
        "name_de": null,
        "short_title": "Photo-Guide",
        "website": null,
        "status": "active",
        "short_abstract": "The core idea of the project is the enhancement of current state-of-the art navigation systems by visual information obtained from geo-referenced photographs. The aim is to establish a suite of tools together with  algorithmic foundations that will be essential for any large scale image-based guidance project.",
        "abstract": "<p>\nThe core idea of the project is the enhancement of current state-of-the art navigation systems by visual information obtained from geo-referenced photographs. The aim is to establish a suite of tools together with  algorithmic foundations that will be essential for any large scale image-based guidance project.\n\n<p>\nThe main hypothesis of this project is that in the case of guidance, especially for pedestrians, the first-person perspective is intuitive and leads to natural orientation. In the project we will  research how to generate first-person views on top of databases of simple ground-based photographs and classical overview maps. The ultimate idea is a system which allows the user to navigate through a city and even through interiors of buildings guided by the aid of annotated first-person views. Furthermore, the actual creation of the routes shall happen automatically on demand.\n\n<p>\nExamples of fields that can benefit from the proposed approach are tourism and general entertainment industries, city planners, local governments, simulation and security training, emergency management, civil protection and disaster control, as well as driving simulation to name but a few. Ultimately, the following scientific fields can profit from the proposed basic research results: computer graphics with image processing, computer vision, computational photography, pattern recognition, photogrammetry and remote sensing, cartography, computer aided design, geo-sciences and mobile-technology.",
        "start_date": "2011-01-01",
        "end_date": "2015-12-31",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:Photo-Guide",
            "type": "image/png",
            "size": 477315,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/Photo-Guide/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P23237-N23",
                "comment": null
            }
        ],
        "research_areas": [
            "Modeling"
        ],
        "publications": [
            "Adorjan-2015",
            "albillos-2013-MA",
            "birsak-2013-sta",
            "birsak-2014-agtb",
            "birsak-2017-dpe",
            "birsak-thesis",
            "charpenay-2013-PR",
            "charpenay-2014-sgn",
            "fan-2014-scfl",
            "kendlbacher-2016",
            "koessler-2013-BA",
            "Kollmann-2015-DoF",
            "liu-2012-tcvd",
            "manpreet_kainth-2012-rus",
            "musialski_2012_aachen",
            "musialski_2012_fice",
            "musialski-2012-icb",
            "musialski-2012-sur",
            "Musialski-2013-ipmum",
            "musialski-2013-prag",
            "musialski-2013-surcgf",
            "schaukowitsch-2013-fls",
            "sperl-2013-BA",
            "steiner-2014-da",
            "trenkwalder-2013-ma",
            "zapotocky_2013_ma"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Photo-Guide",
        "__class": "Project"
    },
    {
        "id": "REPLICATE",
        "workgroup_id": "rend",
        "drupal_id": 4216,
        "drupal_path": "/research/projects/REPLICATE",
        "name": "Semantic Modeling and Acquisition for Urban Safety Simulations",
        "name_de": null,
        "short_title": "REPLICATE",
        "website": null,
        "status": "active",
        "short_abstract": "<p>This project aims at analysing the traffic flow of open roads in real time by a sensor fusion of radar data with video feeds. Radar gives an accurate position and velocity of vehicles making computer vision methods more robust in computing their spatial extent and classification from video streams. In this way the huge amount of raw data is reduced to semantically relevant information, which is highly memory efficient, anonymous and sufficient to reconstruct traffic flow over long time periods. Another important goal is a sophisticated 3D visualization of the reconstructed traffic flow providing interactive tools for visual analysis. Information obtained in this way will significantly contribute in adopting measures to increase traffic safety.</p>\r\n",
        "abstract": "<p>This project aims at analysing the traffic flow of open roads in real time by a sensor fusion of radar data with video feeds. Radar gives an accurate position and velocity of vehicles making computer vision methods more robust in computing their spatial extent and classification from video streams. In this way the huge amount of raw data is reduced to semantically relevant information, which is highly memory efficient, anonymous and sufficient to reconstruct traffic flow over long time periods. Another important goal is a sophisticated 3D visualization of the reconstructed traffic flow providing interactive tools for visual analysis. Information obtained in this way will significantly contribute in adopting measures to increase traffic safety.</p>\r\n",
        "start_date": "2013-01-01",
        "end_date": "2015-12-31",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": "835948",
                "comment": "FIT-IT"
            }
        ],
        "research_areas": [
            "Modeling",
            "Rendering"
        ],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/REPLICATE",
        "__class": "Project"
    },
    {
        "id": "Scenario Pool",
        "workgroup_id": "vis",
        "drupal_id": 4213,
        "drupal_path": "/research/projects/Scenario-Pool",
        "name": "Scenario Pool: Visual Analytics for Action Planning in the Presence of Uncertainty",
        "name_de": null,
        "short_title": "Scenario Pool",
        "website": null,
        "status": "active",
        "short_abstract": "<p>Natural hazards such as floods are likely to occur more often in the near future. Modern information technology can simplify the workflow of disaster management. However, none of the existing tools is tailored to the requirements of action planning. The major challenge lies in the sheer quantity of complex information the planner has to account for. The choice of the right action at the right time depends on a variety of factors and cannot be automated. Moreover, since the course of events cannot be exactly predicted, numerous, alternative scenarios have to be considered. This project is dedicated to research in the field of Visual Analytics for the development of a novel decision-support system with two major goals in mind: First, users will be able to create a large scenario pool without the need for engineering skills. Second, first responders will be empowered to exploit this scenario pool for decision making in time-critical situations.</p>\r\n",
        "abstract": "<p>Natural hazards such as floods are likely to occur more often in the near future. Modern information technology can simplify the workflow of disaster management. However, none of the existing tools is tailored to the requirements of action planning. The major challenge lies in the sheer quantity of complex information the planner has to account for. The choice of the right action at the right time depends on a variety of factors and cannot be automated. Moreover, since the course of events cannot be exactly predicted, numerous, alternative scenarios have to be considered. This project is dedicated to research in the field of Visual Analytics for the development of a novel decision-support system with two major goals in mind: First, users will be able to create a large scenario pool without the need for engineering skills. Second, first responders will be empowered to exploit this scenario pool for decision making in time-critical situations.</p>\r\n",
        "start_date": "2012-07-01",
        "end_date": "2015-12-31",
        "leader_id": 798,
        "logo": null,
        "funding_organisations": [
            {
                "id": "WWTF",
                "contract_number": "ICT12-009",
                "comment": null
            }
        ],
        "research_areas": [
            "InfoVis"
        ],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Scenario-Pool",
        "__class": "Project"
    },
    {
        "id": "KASI",
        "workgroup_id": "vis",
        "drupal_id": 4220,
        "drupal_path": "/research/projects/KASI",
        "name": "KASI: Knowledge Assited Sparse Interaction for Peripheral CT",
        "name_de": null,
        "short_title": "KASI",
        "website": null,
        "status": "active",
        "short_abstract": "The knowledge assisted sparse interaction proposed above is a general paradigm, applicable to any area of visualization. However, within the framework of the current proposal we want to specialize predominantly on one of these possibilities, namely on the processing and diagnostics of the peripheral artery occlusive disease (PAOD) within the context of noninvasive CT angiography [1]. PAOD is a significant health problem in the industrialized world, with a prevalence of 8-12 million cases in the US alone [32]. The number of such cases is expected to increase as the population ages.",
        "abstract": "The knowledge assisted sparse interaction proposed above is a general paradigm, applicable to any area of visualization. However, within the framework of the current proposal we want to specialize predominantly on one of these possibilities, namely on the processing and diagnostics of the peripheral artery occlusive disease (PAOD) within the context of noninvasive CT angiography [1]. PAOD is a significant health problem in the industrialized world, with a prevalence of 8-12 million cases in the US alone [32]. The number of such cases is expected to increase as the population ages.",
        "start_date": "2010-07-01",
        "end_date": "2015-09-30",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "TRP 67-N23",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/KASI",
        "__class": "Project"
    },
    {
        "id": "ViMaL",
        "workgroup_id": "vis",
        "drupal_id": 4222,
        "drupal_path": "/research/projects/ViMaL",
        "name": "ViMaL: A Visualization Mapping Language",
        "name_de": null,
        "short_title": "ViMaL",
        "website": null,
        "status": "active",
        "short_abstract": "Visualization is the discipline dealing with the depiction of data. The assignment of visual abstractions to data abstractions is referred to as the visualization mapping. \nExpressive visualization mappings proofed in many cases to be more effective than the examination of raw data (i.e., large tables of numbers). Although visualization mapping is a crucial step in the visualization pipeline surprisingly few general approaches exist.\nWe propose to develop the Visualization Mapping Language (ViMaL) that is capable of describing expressive visualization mappings and provides user interfaces suitable for non-experts in visualization. \nViMaL will be a language that is used to specify visualization pipelines. We will provide language concepts for the specification of data abstraction processes, visual concretization processes and the required visualization mapping. With this approach the semantics of a specific domain are modeled by the domain experts while the semantics of the visualization domain are modeled by the visualization expert. The visualization mapping is described using domain semantics as well as visualization semantics. Unlike other general purpose visualization systems, ViMaL systems explicitly use the semantics of the visualization mapping process. It incorporates information and knowledge assisted methods in the visualization mapping pipeline.\nIn the scope of this project we will define a novel Visualization Mapping Language and implement a toolbox that aids researchers from other areas to integrate the ViMaL concepts into their systems. The toolbox will provide basic ViMaL document setup and parsing as well as components for each step in the visualization pipeline. We will make use of fuzzy logic in our components for the abstraction of data, for the visual concretization, and for the visualization mapping. \nWe believe that the formalization of the visualization pipeline that is achieved with the visualization mapping language will enable many researchers from other domains to benefit from existing visualization methods. Further, the introduction of domain and visualization semantics for the specification of visualization mappings enables meta-visualization approaches that provide insight into the visualization process itself. Laypersons will benefit from such meta-visualization systems that are able to illustrate the involved components.",
        "abstract": "Visualization is the discipline dealing with the depiction of data. The assignment of visual abstractions to data abstractions is referred to as the visualization mapping. \nExpressive visualization mappings proofed in many cases to be more effective than the examination of raw data (i.e., large tables of numbers). Although visualization mapping is a crucial step in the visualization pipeline surprisingly few general approaches exist.\nWe propose to develop the Visualization Mapping Language (ViMaL) that is capable of describing expressive visualization mappings and provides user interfaces suitable for non-experts in visualization. \nViMaL will be a language that is used to specify visualization pipelines. We will provide language concepts for the specification of data abstraction processes, visual concretization processes and the required visualization mapping. With this approach the semantics of a specific domain are modeled by the domain experts while the semantics of the visualization domain are modeled by the visualization expert. The visualization mapping is described using domain semantics as well as visualization semantics. Unlike other general purpose visualization systems, ViMaL systems explicitly use the semantics of the visualization mapping process. It incorporates information and knowledge assisted methods in the visualization mapping pipeline.\nIn the scope of this project we will define a novel Visualization Mapping Language and implement a toolbox that aids researchers from other areas to integrate the ViMaL concepts into their systems. The toolbox will provide basic ViMaL document setup and parsing as well as components for each step in the visualization pipeline. We will make use of fuzzy logic in our components for the abstraction of data, for the visual concretization, and for the visualization mapping. \nWe believe that the formalization of the visualization pipeline that is achieved with the visualization mapping language will enable many researchers from other domains to benefit from existing visualization methods. Further, the introduction of domain and visualization semantics for the specification of visualization mappings enables meta-visualization approaches that provide insight into the visualization process itself. Laypersons will benefit from such meta-visualization systems that are able to illustrate the involved components.",
        "start_date": "2010-01-01",
        "end_date": "2014-12-31",
        "leader_id": 166,
        "logo": {
            "name": "logo.png",
            "path": "project:ViMaL",
            "type": "image/png",
            "size": 51461,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/ViMaL/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P 21695-N23",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "beham-2014-cupid",
            "Birkeland-2012-IMC",
            "borgo-2013-gly",
            "bruckner-2010-HVC",
            "Csebfalvi-2012-IOM",
            "Groeller_2011_NR",
            "Herghelegiu-2012-BPV",
            "kehrer-2013-IVA",
            "kehrer-2013-SBC",
            "Kehrer-2014-CSD",
            "Labschuetz_Matthias_2016_JIT",
            "Labschuetz_Matthias_2016_JITT",
            "mindek_peter-2014-cs_kaust",
            "mindek-2013-cs_cvut",
            "mindek-2013-csl",
            "mindek-2013-pel",
            "mindek-2014-mcs",
            "mindek-2014-vivi_cvut",
            "mindek-2015-mctalk",
            "mindek-thesis",
            "oeltze-2013-tut",
            "Peter_2012_AIV",
            "rautek2010",
            "Ropinski-2012-UBT",
            "schmidt-phd",
            "sikachev_peter_2010_STLCI",
            "sikachev_peter-2011-dfc",
            "sikachev_peter-2011-incshad",
            "sikachev_peter-2011-metavis",
            "sikachev_peter-2011-protovis",
            "sikachev-2010-DFC",
            "sikachev-2010-ill_vis_vol_ren",
            "Soros_AVN_2011",
            "vaico",
            "ymca"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/ViMaL",
        "__class": "Project"
    },
    {
        "id": "Smart-CT",
        "workgroup_id": "vis",
        "drupal_id": 4224,
        "drupal_path": "/research/projects/Smart-CT",
        "name": "Smart-CT: Genaue Geometriebestimmung und Interfacecharakterisierung von Multi-Materialbauteilen mittels Kegelstrahl-CT",
        "name_de": null,
        "short_title": "Smart-CT",
        "website": null,
        "status": "active",
        "short_abstract": "Die Vermessung der 3D-Geometrie und von inneren Grenzflächen (z.B. Materialübergänge, Porennester, Inhomogenitäten,¿) von industriellen Objekten wie z.B. Multimaterial und Verbund-werkstoffkomponenten ist integraler Bestandteil moderner Bauteilentwicklung und Qualitätssicherung. Üblicherweise wird die Geometrie taktil mittels Koordinatenmessgeräten an vordefinierten Punkten gemessen. In speziellen Fällen wird die taktile Messtechnik mittels optischer Scanner unterstützt. Allerdings können beide Messverfahren nicht innere Geometrien oder Materialübergänge erfassen. Um diese Nachteile zu eliminieren, wird in letzter Zeit immer häufiger Röntgen-Computertomografie (CT) verwendet. Hierbei wird meist 3DCT eingesetzt (=CT mit einer Kegelstrahl-Röntgenquelle und einem Matrixdetektor), die wegen der signifikant kürzeren Messzeiten und der damit verbundenen Kostenersparnis für die meisten Anwendungen attraktiver ist als konventionelle 2DCT (=CT mit einer Fächerstrahl-Quelle und einem Zeilendetektor)[CT08, PTB07, DIR07, DIR03, SS00]. CT ist eine zerstörungsfreie Methode, mit der Bauteile dreidimensional vermessen werden und versteckte Fehler (z.B. Lunker, Risse, Verunreinigungen, Poren, Materialfehler, Materialübergänge,...) in der Tiefe eines Werkstoffes detektiert werden können. Das Verfahren der CT ist seit vielen Jahren in der medizinischen Diagnostik etabliert und hat in jüngster Zeit sehr stark an Bedeutung in der Industrie gewonnen. Der Fokus dieses vorgeschlagenen Projekts liegt im Bereich der 3DCT für industrielle Anwendungen. Für alle weiteren Ausführungen wird CT in diesem Kontext betrachtet.",
        "abstract": "Die Vermessung der 3D-Geometrie und von inneren Grenzflächen (z.B. Materialübergänge, Porennester, Inhomogenitäten,¿) von industriellen Objekten wie z.B. Multimaterial und Verbund-werkstoffkomponenten ist integraler Bestandteil moderner Bauteilentwicklung und Qualitätssicherung. Üblicherweise wird die Geometrie taktil mittels Koordinatenmessgeräten an vordefinierten Punkten gemessen. In speziellen Fällen wird die taktile Messtechnik mittels optischer Scanner unterstützt. Allerdings können beide Messverfahren nicht innere Geometrien oder Materialübergänge erfassen. Um diese Nachteile zu eliminieren, wird in letzter Zeit immer häufiger Röntgen-Computertomografie (CT) verwendet. Hierbei wird meist 3DCT eingesetzt (=CT mit einer Kegelstrahl-Röntgenquelle und einem Matrixdetektor), die wegen der signifikant kürzeren Messzeiten und der damit verbundenen Kostenersparnis für die meisten Anwendungen attraktiver ist als konventionelle 2DCT (=CT mit einer Fächerstrahl-Quelle und einem Zeilendetektor)[CT08, PTB07, DIR07, DIR03, SS00]. CT ist eine zerstörungsfreie Methode, mit der Bauteile dreidimensional vermessen werden und versteckte Fehler (z.B. Lunker, Risse, Verunreinigungen, Poren, Materialfehler, Materialübergänge,...) in der Tiefe eines Werkstoffes detektiert werden können. Das Verfahren der CT ist seit vielen Jahren in der medizinischen Diagnostik etabliert und hat in jüngster Zeit sehr stark an Bedeutung in der Industrie gewonnen. Der Fokus dieses vorgeschlagenen Projekts liegt im Bereich der 3DCT für industrielle Anwendungen. Für alle weiteren Ausführungen wird CT in diesem Kontext betrachtet.",
        "start_date": "2009-01-01",
        "end_date": "2013-09-27",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": "818108 (Bridge project)",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Smart-CT",
        "__class": "Project"
    },
    {
        "id": "AutARG",
        "workgroup_id": "vis",
        "drupal_id": 4226,
        "drupal_path": "/research/projects/AutARG",
        "name": "AutARG: Automatic Algorithms for Result Generation in Visualization",
        "name_de": null,
        "short_title": "AutARG",
        "website": null,
        "status": "active",
        "short_abstract": "The main goal of the proposed Dissertation Fellowship project AutARG (Automatic Algorithms for Result Generation in Visualization) is to speed up the analysis process of 3D/4D simulation data from different domains like engineering, meteorology, climatology, or medical applications. Since the number and size of simulations in these fields is increasing rapidly fewer and fewer time can be spent in the actual analysis process of the generated data. We therefore want to introduce novel approaches to semi-automatically generate visualization and analysis results based on user-defined templates, rules and semantic information gathered during previous interactive analysis sessions. This will greatly improve the efficiency of the post-processing phase necessary to document and present information gained from the simulation results. In the course of this project we plan to research efficient and user friendly methods for the analysis and presentation of simulation results, which will be implemented in the SimVis framework. SimVis is a state of the art interactive visual analysis tool for large and complex simulation data, which up to now, like many other state of the art systems, is relying mostly on time-consuming user interaction. The planned project will be structured into three larger phases of research and development. In the first phase automatic generation of visualization results based on manually specified features will be introduced. The second phase will deal with semi-automatic feature specification based on templates additionally to the automatic result generation. The third phase should extend the proposed methods to incorporate story telling functionality which can be used to present results to non-experts. The results of this project will not only include the PhD thesis of Philipp Muigg and a larger number of high-quality publications, but also newly developed software modules, which shall be capable of demonstrating the full potential of the newly developed methods also for real world datasets from various application fields.",
        "abstract": "The main goal of the proposed Dissertation Fellowship project AutARG (Automatic Algorithms for Result Generation in Visualization) is to speed up the analysis process of 3D/4D simulation data from different domains like engineering, meteorology, climatology, or medical applications. Since the number and size of simulations in these fields is increasing rapidly fewer and fewer time can be spent in the actual analysis process of the generated data. We therefore want to introduce novel approaches to semi-automatically generate visualization and analysis results based on user-defined templates, rules and semantic information gathered during previous interactive analysis sessions. This will greatly improve the efficiency of the post-processing phase necessary to document and present information gained from the simulation results. In the course of this project we plan to research efficient and user friendly methods for the analysis and presentation of simulation results, which will be implemented in the SimVis framework. SimVis is a state of the art interactive visual analysis tool for large and complex simulation data, which up to now, like many other state of the art systems, is relying mostly on time-consuming user interaction. The planned project will be structured into three larger phases of research and development. In the first phase automatic generation of visualization results based on manually specified features will be introduced. The second phase will deal with semi-automatic feature specification based on templates additionally to the automatic result generation. The third phase should extend the proposed methods to incorporate story telling functionality which can be used to present results to non-experts. The results of this project will not only include the PhD thesis of Philipp Muigg and a larger number of high-quality publications, but also newly developed software modules, which shall be capable of demonstrating the full potential of the newly developed methods also for real world datasets from various application fields.",
        "start_date": "2009-04-01",
        "end_date": "2013-06-03",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": "FIT-IT project no. 819352",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/AutARG",
        "__class": "Project"
    },
    {
        "id": "Semantic Steering",
        "workgroup_id": "vis",
        "drupal_id": 4228,
        "drupal_path": "/research/projects/Semantic-Steering",
        "name": "Semantic Steering",
        "name_de": null,
        "short_title": "Semantic Steering",
        "website": null,
        "status": "active",
        "short_abstract": "Mission: \"To support time-critical decision making using visual simulation control\"",
        "abstract": "Fluid simulation tools are capable of predicting natural processes and may be employed for the assistance in the human decision making process. Existing solutions lack a number of important features needed for a feasible support system. Very important is the usability of the simulation tools by non-experts in real-time without special fluid simulation expertise. More research is thus needed which fully exploits modern visualization,  steering and simulation technologies. Especially, with the capabilities of  graphics hardware clusters, everybody will have access to affordable supercomputing power on their desktop. Until now, little work has been done to use this power for interactive simulation for knowledge generation. In this proposal we suggest a novel, integrated system that operates in real-time and provides effective feedback via an intuitive web-based interface. The proposed technique enables users to interact with a remote simulation system based on their understanding and to examine alternative scenarios in a short period of time.  Among the application areas of the suggested system we consider the industrial design of components where rapid prototyping is required. Using a less accurate but fast and intuitive system will help to evaluate whether a certain concept is promising during the design phase. Another important application will be the assistance in emergency situations that are caused by natural disasters such as floods, where safety and damage limitation depend on fast decisions. Our vision is that, even under time-critical circumstances, emergency personel on-site will be able to analyze the imminent situation quickly to finally choose the best response strategy. Consider a flooding situtation, where helpers are trying to prevent danage by arranging flow barriers in different locations. Using a mobile device like a hand-held tablet pc, the helpers are capable to quickly setup and perform a simulation. Interactive 3d renderings and 2d sketches of the local area illustrate the simulation outcome and show the flooding risks. Assuming that later, the weather conditions change unexpectedly,  quick responses are necessary.  By means of intuitive drawing with a control pen, users rearrange the barriers and start an additional simulation. In such an interactive cycle, helpers are able to quickly optimize the arrangement of the barriers.\nTo realize our vision we plan the integration and extension of methods from multivariate visualization, computational steering and physical simulation. This combination evolves into a new paradigm we call semantic steering:  Users will be able to steer simulations based on their knowledge and understanding. We suggest to move from a direct manipulation of numbers towards interaction with advanced visual controls. No more complicated rethinking, users simply sketch their ideas. Visual analytics will be crucial for the success of the project. Visualization is a natural way to get insight into the complex simulation outcome and to understand the relevance of decisions made.  Novel techniques are required as users will be confronted with  a whole range of simulation runs. To develop and show the feasibilty of our concepts we will incorporate existing simulation solutions that are implemented on graphics hardware and provide a good tradeoff between accuracy and speed.  New data structures and algorithms for scalable visualization, interaction, analytics and data-handling are necessary and we have compiled a team of senior and junior scientists with experience in both scientific research and industrial applications to solve the involved questions.",
        "start_date": "2010-06-01",
        "end_date": "2013-05-31",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P 22542-N23",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "Ribicic-2010-CRS",
            "Waser-2010-WL"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Semantic-Steering",
        "__class": "Project"
    },
    {
        "id": "RESHADE",
        "workgroup_id": "rend",
        "drupal_id": 4230,
        "drupal_path": "/research/projects/RESHADE",
        "name": "Reciprocal Shading for Mixed Reality",
        "name_de": null,
        "short_title": "RESHADE",
        "website": null,
        "status": "active",
        "short_abstract": "The aim of this project is to simulate the mutual influence between real and virtual objects in mixed reality applications.",
        "abstract": "<p>The aim of the RESHADE project is to simulate the mutual influence between real and virtual objects in mixed reality applications. Virtual objects in such applications appear disturbingly artificial because rendering completely ignores the real environment. But from the term mixed reality one would expect that virtual and real object harmoniously blend into one visual perception and cannot be distinguished easily.</p>\r\n\r\n<p>The ambitious goal of this project is to provide users with a perfect illusion, so that they cannot perceive a difference between virtual and real objects. That means virtual objects are responsive to changes in the real environment in real time. They have to be rendered in a believable realistic way so that they smoothly blend with reality. It is also important to simulate the visual effects of virtual objects on real ones.</p>\r\n\r\n<p>Besides for realistic rendering this technique can also be used for innovative ways of interaction and dynamic behaviour of virtual objects, which are key aspects of mixed reality.</p>\r\n\r\n<p>Research focuses on real time rendering algorithms for mixed reality applications that consider the lighting and geometry of the real environment. Special cameras will continuously capture reality. The real geometry will also be used to simulate effects of virtual objects on real ones in real time. Shading of real objects will be corrected by digital composition.</p>\r\n\r\n<p>Research results will be repeatedly evaluated in the course of the project within three different application scenarios. Thereby human perception is the sole key factor for assessment of the quality of the solution.</p>\r\n",
        "start_date": "2009-02-01",
        "end_date": "2013-01-31",
        "leader_id": 237,
        "logo": {
            "name": "logo.png",
            "path": "project:RESHADE",
            "type": "image/png",
            "size": 187528,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/RESHADE/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": "820916",
                "comment": "FIT-IT Visual Computing"
            }
        ],
        "research_areas": [
            "Rendering"
        ],
        "publications": [
            "celarek_adam-2012-rrmro",
            "jahrmann_klemens_KFR",
            "knecht_2013_RSM",
            "knecht_martin_2010_DIR",
            "knecht_martin_2012_BRDFEstimation",
            "knecht_martin_2012_RSMR",
            "knecht_martin_2013_ReflRefrObjsMR",
            "knecht_martin-2011-FPSPAR",
            "knecht-2011-CBCM",
            "KUE11",
            "laager_florian-2013-camr",
            "rasch_martina-2013-HDRImage",
            "spelitz_stefan-2012-CDTFMR",
            "stutter_david-2012-kft",
            "winklhofer_christoph-2013-RRMR"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/RESHADE",
        "__class": "Project"
    },
    {
        "id": "Desiree",
        "workgroup_id": "rend",
        "drupal_id": 4232,
        "drupal_path": "/research/projects/Desiree",
        "name": "Detailed Surfaces for Interactive Rendering",
        "name_de": null,
        "short_title": "Desiree",
        "website": null,
        "status": "active",
        "short_abstract": "In this project we study new representation and rendering methods for surface detail in real time.",
        "abstract": "Humans perceive their surroundings by the light that is reflected from surfaces. In particular, it is the fine detail observed on surfaces that provides clues about material properties like roughness, texture, temperature, etc. Similarly, in interactive computer graphics applications, the complexity of surface renderings is one of the first things that occur to an observer when judging how good or realistic the application looks. One key issue is to provide the same amount of visual detail as a real surface. This plays an important role in a number of interactive applications where the viewpoint and illumination change rapidly, including visual impact analysis, cultural heritage, design reviews, architecture and urban planning, driving and traffic simulation, engineering and computer games to name but a few. The aim of the Desiree project is to develop algorithms and data structures to efficiently acquire, store and display geometrically complex surfaces for such real-time applications.\nAdding surface detail has been a research topic since the early days of computer graphics, including texture mapping, displacement mapping and slice-based representations. However, none of these approaches can directly be used for the aforementioned applications due to insufficient image quality, non-interactivity or too high memory requirements. In addition, no tools are available to convert the complex models created by 3D artists into representations useful for fast and high-quality display.\nIn Desiree, the main strategy will be to treat the rough object shape and the fine-scale details separately. We will research efficient data structures and algorithms that consider all aspects of this strategy, starting from the decomposition of a complex mesh into low- and high-detail components, efficient representations for the high-detail components, different mappings from the high-detail to the low-detail representation, and high-quality rendering in real time, including anti-aliasing issues and realistic illumination. For rendering, we will exploit recent programmable graphics hardware to develop output-sensitive display algorithms based on ray casting. We believe that this concept will allow us to achieve both, high image quality and real-time frame rates at the same time.\n",
        "start_date": "2008-07-01",
        "end_date": "2012-12-31",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:Desiree",
            "type": "image/png",
            "size": 1623033,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/Desiree/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P20768-N13",
                "comment": null
            }
        ],
        "research_areas": [
            "Rendering"
        ],
        "publications": [
            "Auzinger_2012_AAA",
            "Auzinger_2013_AnaVis",
            "auzinger-thesis",
            "bhagvat-09-frusta",
            "jeschke-09-rendering",
            "jeschke-09-solver",
            "jeschke-2010-diff",
            "jeschke-2011-est",
            "jeschke-2011-esttalk",
            "jeschke-2011-talkPrague",
            "MeierStauffer-2013-iihf",
            "Nuernberg-2011-CBO",
            "preiner_2012_AS",
            "prieler_11_patchmatch"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Desiree",
        "__class": "Project"
    },
    {
        "id": "Scale-VS",
        "workgroup_id": "vis",
        "drupal_id": 4234,
        "drupal_path": "/research/projects/Scale-VS",
        "name": "Scale-VS: Research on the Scalability and Confluence of Scientific Visualization and Interactive Segmentation",
        "name_de": null,
        "short_title": "Scale-VS",
        "website": null,
        "status": "active",
        "short_abstract": "<p>Research on the Scalability and Confluence of Scientific Visualization and Interactive Segmentation (together with Markus Hadwiger, VRVis (main applicant))</p>\r\n",
        "abstract": "<p>Research on the Scalability and Confluence of Scientific Visualization and Interactive Segmentation (together with Markus Hadwiger, VRVis (main applicant))</p>\r\n",
        "start_date": "2009-01-01",
        "end_date": "2012-12-31",
        "leader_id": 226,
        "logo": null,
        "funding_organisations": [
            {
                "id": "WWTF",
                "contract_number": "ICT08-40, 2009-201 2",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Scale-VS",
        "__class": "Project"
    },
    {
        "id": "TERAPOINTS",
        "workgroup_id": "rend",
        "drupal_id": 4236,
        "drupal_path": "/research/projects/TERAPOINTS",
        "name": "TERAPOINTS - High Quality Visualization and Interaction with Gigantic Point Data Sets",
        "name_de": null,
        "short_title": "TERAPOINTS",
        "website": null,
        "status": "active",
        "short_abstract": "Laser scanning is an important tool in cultural heritage for documenting the state of archaeological monuments. The data produced by laser scanning is ever increasing, with the latest generation of laser scanners generating a billion of points in one measurement pass. To colorize the laser scans, additional photographs are taken. Managing these huge amounts of data in terms of size is a challenge on its own, but also to ensure the quality of the resulting point-based models is of utmost importance for the further development of laser scanning as a standard technique in cultural heritage. We identified three topics that can drive forward the integration of laser scanning in the everyday work of archaeologists.",
        "abstract": "<b>Project partners:</b>\n<ul>\n<li><a href=\"http://www.oeaw.ac.at/antike/index.php?id=50\">Austrian Academy of Sciences (ÖAW):</a> Norbert Zimmermann</li>\n<li><a href=\"http://www.imagination.at\">Imagination Computer Services GmbH</a></li>\n</ul>\n\n<p>\nLaser scanning is an important tool in cultural heritage for documenting the state of archaeological monuments. The data produced by laser scanning is ever increasing, with the latest generation of laser scanners generating a billion of points in one measurement pass. To colorize the laser scans, additional photographs are taken. Managing these huge amounts of data in terms of size is a challenge on its own, but also to ensure the quality of the resulting point-based models is of utmost importance for the further development of laser scanning as a standard technique in cultural heritage. We identified three topics that can drive forward the integration of laser scanning in the everyday work of archaeologists.\n\n<p>\nThe first topic is the recoloring of point clouds that are combined from several different scan positions. The photographs taken at the different scan positions all exhibit different colorizations of the objects in the scene, as the lighting conditions usually vary from scan position to scan position. We want to develop methods that can provide an artifact free colorized model, independent of the lighting conditions that prevailed during laser scanning. The second topic deals with the management of large point clouds. It incorporates compression of the point cloud data, where the data shall nevertheless remain editable, and the display of large data. Several compression techniques for different point attributes, lossless as well as lossy compression, and fitting points to higher order primitives will be used for compression depending on the further use of the data. To display the data, an eye-tracker can be used to increase the visual experience of the user. The third topic deals with the distribution of data over a network, and navigating through the data, so that users can get access to large point-based models of cultural heritage sites with intuitive navigation capabilities.\n<p>\nCultural Heritage is a growing market that needs intensive support from enabling technologies like real-time computer graphics and 3D object reconstruction. Especially the preservation and presentation of archaeological or historical items (architecture, artifacts) are of high importance there. Austria is well known in the tourism industry because of its cultural values. Preserving and presenting such cultural values not only physically but also in digital form, will help to distribute these values also using new media channels to the broad public.\nAny improvement of the technology that helps to preserve cultural items and allows presenting such items and scientific findings to a large audience will improve the value of these cultural items and in the same way boost the tourism industry.\n<p>\nEspecially in Austria there is a huge \"creative industries\" community, small and medium companies working on innovative presentations of cultural goods. Especially these companies will profit from the technology that will be developed in this project. One of these \"creative industries\" companies is Imagination, partner in this project and pioneer in cultural heritage presentations and preservation software.",
        "start_date": "2010-01-01",
        "end_date": "2012-12-31",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:TERAPOINTS",
            "type": "image/png",
            "size": 1100956,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/TERAPOINTS/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": "825842",
                "comment": "FIT-IT Visual Computing"
            }
        ],
        "research_areas": [
            "Geometry",
            "Rendering"
        ],
        "publications": [
            "arikan-2013-osn",
            "arikan-thesis",
            "Hebart-2011-ColorAdjustment",
            "leimer-2013-esopc",
            "marek-2011-normalest",
            "mayer_2011_cipa",
            "Mayer-2010-VT",
            "mazza-2012-bakk",
            "preiner_2012_AS",
            "preiner11IR",
            "probst_kolesik_2010_pk10",
            "scheiblauer-2011-cag",
            "scheiblauer-2011-chnt",
            "scheiblauer-2012-chnt",
            "scheiblauer-2013-wscg",
            "SCHEIBLAUER-2015-WFC",
            "scheiblauer-thesis",
            "TR-186-2-12-01",
            "Tragust-2012-GMHPC",
            "Tragust-2014-master-thesis",
            "trumpf_stefan_2012"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/TERAPOINTS",
        "__class": "Project"
    },
    {
        "id": "GAMEWORLD",
        "workgroup_id": "rend",
        "drupal_id": 4238,
        "drupal_path": "/research/projects/GAMEWORLD",
        "name": "GAMEWORLD - Procedural Worlds for Games",
        "name_de": null,
        "short_title": "GAMEWORLD",
        "website": null,
        "status": "active",
        "short_abstract": "The aim of the GAMEWORLD project is to procedurally generate cities, villages and other game environments mainly to be used in tomorrow's video games.",
        "abstract": "The aim of the GAMEWORLD project is to procedurally generate cities, villages and other game environments mainly to be used in tomorrow's video games.\n<p>\nTypically, 3D game and virtual environments are created using off-the-shelf software like Autodesk Maya or MAX. While these tools allow professional artists to produce very high-quality models, this task is becoming more and more labour intensive and time consuming, and therewith expensive, as the detail expected from virtual worlds is continuously increasing. This is one of the most important problems in game development, so that finding suitable automatic modelling techniques has become somewhat of a holy grail of the game industry.\n<p>\nIn the Gameworld project, we aim to facilitate and automate the process of generating interactive 3D models, while still giving the designer a high level of control over the visual and architectural style of the created models. We will go beyond modelling building facades, and cover more general objects, including street networks and green spaces, urban furniture, indoor environments, and semantic gameplay annotations.\n<p>\nIn order to achieve this goal, we will use sophisticated procedural design grammars, precursors to which have already been developed at the <a href=\"/\">Vienna University of Technology</a> and the <a href=\"http://prism.asu.edu/\">Arizona State University</a>. We expect this approach to lead to tools that can be applied in production environments, significantly reducing the effort required to create interactive worlds. Potential application areas of this technology are not limited to video games, but include any field using virtual environments.\n<p>\nThe project team combines scientific and industrial experience in an ideal way. The academic partners have already been cooperating on research into procedural modelling techniques previously. <a href=\"http://www.sproing.com\">Sproing</a> is Austria's leading video and computer game development studio, and will bring its long standing experience and knowledge about the needs of the game development industry to the table.\n",
        "start_date": "2007-09-01",
        "end_date": "2012-08-31",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:GAMEWORLD",
            "type": "image/png",
            "size": 602918,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/GAMEWORLD/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": "813387",
                "comment": "FIT-IT Visual Computing"
            }
        ],
        "research_areas": [
            "Modeling"
        ],
        "publications": [
            "dusberger-2010-msm",
            "fiedler-2009-php",
            "GEBHART-2008-AGV",
            "Habel_09_PGT",
            "Habel-2010-EIN",
            "ilcik-2010-ps",
            "ilcik-2011-pmous",
            "kuehtreiber-2010-ikph",
            "lipp_markus-2010-DAC",
            "LIPP-2007-CGA",
            "LIPP-2008-IEV",
            "LIPP-2009-PGL",
            "LIPP-2009-PGL2",
            "LIPP-2010-PGMS",
            "lipp2011a",
            "MOELLINGER-2010-SD",
            "NIEDERREITER-2010-SIM",
            "Purgathofer-2009-rus",
            "RADAX-2010-LOD",
            "scharl-master-thesis",
            "unterguggenberger-2010-cmph"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/GAMEWORLD",
        "__class": "Project"
    },
    {
        "id": "Landspotting",
        "workgroup_id": "rend",
        "drupal_id": 4240,
        "drupal_path": "/research/projects/Landspotting",
        "name": "Landspotting - Creating Games for Improving Land Cover",
        "name_de": null,
        "short_title": "Landspotting",
        "website": null,
        "status": "active",
        "short_abstract": "Social gaming to collect vast amounts of data for satellite validation ",
        "abstract": "At present there is no single satellite-derived global land cover product that is accurate enough to provide reliable estimates of forest or cropland area to determine, for example, how much additional land is available to grow biofuels or to tackle problems of food security. This project aims to improve the quality of this land cover information by vastly increasing the amount of in-situ validation data available for calibration and validation of satellite-derived land cover. \n\nThe Geo-Wiki (Geo-Wiki.org) system currently allows users to compare three satellite derived land cover products and validate them using Google Earth. However, there is presently no incentive for anyone to provide this data so the amount of validation through Geo-Wiki has so far been quite limited. The LandSpotting project will take a truly innovative approach by the addition of crowdsourcing through the development of a game. The game will engage users whilst simultaneously collecting a large amount of in-situ land cover information. The development of the game will be informed by the current raft of successful social gaming that is available on the internet and as mobile applications, many of which are geo-spatial in nature.\n\nAt the same time, the Geo-Wiki system will be modified to exploit the latest available satellite images and to use the acquired in-situ validation information to create new outputs: a hybrid land cover map, which takes the best information from each individual product to create a single integrated version; a database of validation points that will be freely available to the land cover user community; and a facility that allows users to create a specific targeted validation area, which will then be provided to the crowdsourcing community for validation. These outputs will turn Geo-Wiki into a valuable system for many users of land cover. \n",
        "start_date": "2011-02-01",
        "end_date": "2012-07-31",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:Landspotting",
            "type": "image/png",
            "size": 9822,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/Landspotting/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": "2008999",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "Fritz-2012",
            "pangerl_2013_GPD",
            "STURN-2013-LGI",
            "STURN-2013-LSI"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Landspotting",
        "__class": "Project"
    },
    {
        "id": "GPV",
        "workgroup_id": "rend",
        "drupal_id": 4242,
        "drupal_path": "/research/projects/GPV",
        "name": "General Purpose Visibility",
        "name_de": null,
        "short_title": "GPV",
        "website": null,
        "status": "active",
        "short_abstract": "<p>Visibility culling is a fundamental problem of computer graphics, and is of crucial importance for many applications, like game development or architectural design. For example, interactively rendering a model containing hundreds of millions of polygons like the Boeing 777 model shown on the cover page is only possible when invisible parts of the model are \"culled\" away. Solving the visibility problem has been an important research topic for many years, and countless methods have been proposed. Amazingly, most approaches still have more or less serious issues that prevent their widespread use. Preprocessed visibility solutions attempt to solve the problem in an offline step, but are often slow, lack robustness, and are hard to implement. Online culling algorithms have a lot of potential since they can be used on arbitrary scenes, but they induce an overhead during rendering which is unacceptable for applications that strive for optimal performance. We believe that basic research problems remain unsolved until this day for preprocessed visibility as well as for online culling, and the goal of this project is to tackle and solve the majority of these problems. In order to do this, we have to gain more insight into the complex properties of visibility, which are still not entirely understood. Our proposed methods have applications in various fields, like game development, architectural design, urban visualization, or massive model visualization.</p>\r\n",
        "abstract": "<p>Visibility culling is a fundamental problem of computer graphics, and is of crucial importance for many applications, like game development or architectural design. For example, interactively rendering a model containing hundreds of millions of polygons like the Boeing 777 model shown on the cover page is only possible when invisible parts of the model are \"culled\" away. Solving the visibility problem has been an important research topic for many years, and countless methods have been proposed. Amazingly, most approaches still have more or less serious issues that prevent their widespread use. Preprocessed visibility solutions attempt to solve the problem in an offline step, but are often slow, lack robustness, and are hard to implement. Online culling algorithms have a lot of potential since they can be used on arbitrary scenes, but they induce an overhead during rendering which is unacceptable for applications that strive for optimal performance. We believe that basic research problems remain unsolved until this day for preprocessed visibility as well as for online culling, and the goal of this project is to tackle and solve the majority of these problems. In order to do this, we have to gain more insight into the complex properties of visibility, which are still not entirely understood. Our proposed methods have applications in various fields, like game development, architectural design, urban visualization, or massive model visualization.</p>\r\n",
        "start_date": "2008-05-01",
        "end_date": "2011-04-30",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:GPV",
            "type": "image/png",
            "size": 335472,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/GPV/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P21130-N13",
                "comment": null
            }
        ],
        "research_areas": [
            "Rendering"
        ],
        "publications": [
            "beham_2009.2009-EVS",
            "bernhard-2010-gph",
            "bernhard-2011-bmtf",
            "bernhard-2011-maicg",
            "BITTNER-2009-AGVS",
            "BITTNER-2009-GEFOC",
            "bittner-2011-scc",
            "druml-2010-PFL",
            "ERNST-2010-IOOC",
            "knecht-2009-TAS",
            "matt2011",
            "mattausch-2010-tao",
            "mattausch-2010-var",
            "Mattausch-2010-vcr",
            "MAYER-2010-MTX",
            "praeauer-2011-et",
            "radax-2010-RTL",
            "radits_2011_gahdr",
            "REINALTER_2010_VIS",
            "scherzer2011c",
            "SSMW09",
            "sundstedt-2013-vag",
            "WALLNER-2010-CSSAO",
            "WIMMER-2009-VCCG",
            "ZL_2011_IVIG"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/GPV",
        "__class": "Project"
    },
    {
        "id": "Sim-CT",
        "workgroup_id": "vis",
        "drupal_id": 4244,
        "drupal_path": "/research/projects/Sim-CT",
        "name": "Sim-CT: Simulation of an industrial 3D computer tomograph",
        "name_de": null,
        "short_title": "Sim-CT",
        "website": null,
        "status": "active",
        "short_abstract": "Die Röntgen-Computertomografie (CT) ist eine sehr gut etablierte Methode in der Medizin, die in den letzten Jahren immer mehr in der Materialforschung und Industrie zum Einsatz kommt. CT ist eine zerstörungsfreie Methode, um Bauteile 3-dimensional zu vermessen und um versteckte Fehler (Risse, Verunreinigungen, Poren,.) in der Tiefe eines Werkstoffes zu detektieren. Bei einem modernen 3D-CT System mit einem a-Si- Matrixdetektor (amorphes Silizium) wird die Qualität der Ergebnisse erheblich von den Messparametern (Röhrenstrom- und Energie, Filter, Position des Messobjektes,.) bestimmt. Diese Einflußgrößen äußern sich in Form von mehr oder weniger stark auftretenden Messartefakten, die das Ergebnis verfälschen können. Bis zu einem gewissen Grad können Artefakte durch Korrekturalgorithmen verringert werden. Diese Korrekturverfahren sind nach wie vor Gegenstand der aktuellen Forschung und sind für 3DCT- Systeme noch nicht in der Praxis brauchbar. Ziel dieses Projekts ist es, Grundlagen für eine vollständige Simulation eines industriellen 3DCTs zu erarbeiten, wenn Geometrie und Material des Messobjektes bekannt sind. Damit können schon vor der CT-Messung die optimalen Messparameter bestimmt und die Artefaktkorrektur durchgeführt werden. Die Projektergebnisse führen zu folgenden Vorteilen: - Besseres Verständnis über das industrielle 3D-CT-System und der Wechselwirkung der Strahlung mit den Einzelkomponenten (insbesondere Werkstück und Detektor) - Verbesserung der CT-Messergebnisse, Reduktion von Artefakten und Fehlmessungen - Ausweitung der Anwendungsfelder von 3D-CT, Erhöhung der Messgenauigkeit",
        "abstract": "Die Röntgen-Computertomografie (CT) ist eine sehr gut etablierte Methode in der Medizin, die in den letzten Jahren immer mehr in der Materialforschung und Industrie zum Einsatz kommt. CT ist eine zerstörungsfreie Methode, um Bauteile 3-dimensional zu vermessen und um versteckte Fehler (Risse, Verunreinigungen, Poren,.) in der Tiefe eines Werkstoffes zu detektieren. Bei einem modernen 3D-CT System mit einem a-Si- Matrixdetektor (amorphes Silizium) wird die Qualität der Ergebnisse erheblich von den Messparametern (Röhrenstrom- und Energie, Filter, Position des Messobjektes,.) bestimmt. Diese Einflußgrößen äußern sich in Form von mehr oder weniger stark auftretenden Messartefakten, die das Ergebnis verfälschen können. Bis zu einem gewissen Grad können Artefakte durch Korrekturalgorithmen verringert werden. Diese Korrekturverfahren sind nach wie vor Gegenstand der aktuellen Forschung und sind für 3DCT- Systeme noch nicht in der Praxis brauchbar. Ziel dieses Projekts ist es, Grundlagen für eine vollständige Simulation eines industriellen 3DCTs zu erarbeiten, wenn Geometrie und Material des Messobjektes bekannt sind. Damit können schon vor der CT-Messung die optimalen Messparameter bestimmt und die Artefaktkorrektur durchgeführt werden. Die Projektergebnisse führen zu folgenden Vorteilen: - Besseres Verständnis über das industrielle 3D-CT-System und der Wechselwirkung der Strahlung mit den Einzelkomponenten (insbesondere Werkstück und Detektor) - Verbesserung der CT-Messergebnisse, Reduktion von Artefakten und Fehlmessungen - Ausweitung der Anwendungsfelder von 3D-CT, Erhöhung der Messgenauigkeit",
        "start_date": "2006-10-01",
        "end_date": "2010-06-30",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": "812136-SCK/KUG (Bridge project)",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Sim-CT",
        "__class": "Project"
    },
    {
        "id": "COMRADE",
        "workgroup_id": "vis",
        "drupal_id": 4254,
        "drupal_path": "/research/projects/COMRADE",
        "name": "COMRADE: Colonoscopic and Orthopaedic Magnetic Resonance Analysis, Diagnosis and Evaluation",
        "name_de": null,
        "short_title": "COMRADE",
        "website": null,
        "status": "active",
        "short_abstract": "The COMRADE (MRI based Visualization and Analysis for Virtual Colonoscopy and Ortopaedics) project aims to explore the possibilities of MRI visualization techniques, e.g., for virtual colonoscopy and orthopaedics, due to similarity in their interests, and to develop a patient-friendly, effective screening tool.",
        "abstract": "The COMRADE (MRI based Visualization and Analysis for Virtual Colonoscopy and Ortopaedics) project aims to explore the possibilities of MRI visualization techniques, e.g., for virtual colonoscopy and orthopaedics, due to similarity in their interests, and to develop a patient-friendly, effective screening tool. The work concentrates on identifying the specific application areas, e.g., diagnostic investigation of Crohn's disease, and improving still ineffective analysis steps of the visualization pipeline, such as data enhancement, mapping to visual properties, etc. ",
        "start_date": "2003-06-01",
        "end_date": "2009-12-31",
        "leader_id": 179,
        "logo": {
            "name": "logo.png",
            "path": "project:COMRADE",
            "type": "image/png",
            "size": 33123,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/COMRADE/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "pms",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "Mlejnek_2004_PF",
            "Mlejnek_2005_AOEPF",
            "Mlejnek_2006_MVOA",
            "Mlejnek-2004-Int",
            "Mlejnek-2004-ITVAC",
            "termeer-2006-000",
            "termeer-2007-covicad",
            "termeer-2008-scmr",
            "termeer-2008-vis",
            "termeer-2009-cvc",
            "termeer-2009-scmr",
            "TR-186-2-08-05",
            "TR-186-2-08-11"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/COMRADE",
        "__class": "Project"
    },
    {
        "id": "diagvis",
        "workgroup_id": "vis",
        "drupal_id": 4256,
        "drupal_path": "/research/projects/diagvis",
        "name": "DiagVis: Diagnostic Visualization for Medical Applications",
        "name_de": null,
        "short_title": "diagvis",
        "website": null,
        "status": "active",
        "short_abstract": "The DiagVis (Diagnostic Visualization) project aims to facilitate and to improve medical diagnostics. The project focuses on radiological needs but also on related disciplines like surgical applications. ",
        "abstract": "The DiagVis (Diagnostic Visualization) project aims to facilitate and to improve medical diagnostics. The project focuses on radiological needs but also on related disciplines like surgical applications. \tReliability, stability and usability of the developed methods should be examined through the integration to a medical workstation to allow a clinical evaluation.",
        "start_date": "2006-01-01",
        "end_date": "2009-12-31",
        "leader_id": 166,
        "logo": {
            "name": "logo.png",
            "path": "project:diagvis",
            "type": "image/png",
            "size": 96193,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/diagvis/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "agfa",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "bruckner-2008-IVV",
            "haidacher-2008-vcbm",
            "Kohlmann-2007-EBV",
            "kohlmann-2007-livesync",
            "kohlmann-2008-lse",
            "kohlmann-2009-cp",
            "kohlmann-2009-lssl",
            "TR-186-2-06-04",
            "TR-186-2-07-11",
            "TR-186-2-08-01",
            "TR-186-2-08-04",
            "TR-186-2-08-10",
            "TR-186-2-08-14"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/diagvis",
        "__class": "Project"
    },
    {
        "id": "exvisation",
        "workgroup_id": "vis",
        "drupal_id": 4248,
        "drupal_path": "/research/projects/exvisation",
        "name": "ExVisation: Expressive Visualization of Volumetric Data",
        "name_de": null,
        "short_title": "exvisation",
        "website": null,
        "status": "active",
        "short_abstract": "<p>To develop novel methods for automatically generating expressive visualizations of complex volumetric data.</p>\r\n",
        "abstract": "<p>To develop novel methods for automatically generating expressive visualizations of complex volumetric data.</p>\r\n",
        "start_date": "2005-08-01",
        "end_date": "2009-12-31",
        "leader_id": 171,
        "logo": {
            "name": "logo.png",
            "path": "project:exvisation",
            "type": "image/png",
            "size": 13153,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/exvisation/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P 18322-N04",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "bruckner-2005-AIV",
            "bruckner-2005-vid",
            "bruckner-2005-VIS",
            "bruckner-2006-EVV",
            "bruckner-2006-ICE",
            "bruckner-2007-EDF",
            "bruckner-2007-STF",
            "bruckner-2008-IIV",
            "bruckner-2009-IVV",
            "diss-thesis-bratislava",
            "diss-thesis-magdeburg",
            "diss-thesis-siegen",
            "eg-tut2005-iv",
            "Marek06",
            "Rautek-2007-O3D",
            "Rautek-2007-SLI",
            "Rautek-2008-IDS",
            "Rautek-2008-kav",
            "Rautek-2008-VF",
            "rautek-2009-vmv",
            "Rautek06Vis",
            "Rautek06VMLS",
            "ruiz-2008-OVR",
            "ruiz-2008-SEV",
            "TR-186-2-05-06",
            "TR-186-2-05-07",
            "TR-186-2-05-08",
            "TR-186-2-06-01",
            "TR-186-2-06-02",
            "TR-186-2-06-03",
            "TR-186-2-07-02",
            "TR-186-2-07-04",
            "TR-186-2-07-05",
            "TR-186-2-07-10",
            "tut-eg-2006",
            "tut-siggraph-2006",
            "tut-vis-2006",
            "tut-vis-2007",
            "Viola-05-Smart",
            "viola-2006-FoA",
            "viola-popular-article-2006",
            "Viola-vistutillustrativevis",
            "vis-foa",
            "xmastree2005"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/exvisation",
        "__class": "Project"
    },
    {
        "id": "Scanopy",
        "workgroup_id": "rend",
        "drupal_id": 4246,
        "drupal_path": "/research/projects/Scanopy",
        "name": "SCANOPY - Scan Data Organisation, Processing and Display",
        "name_de": null,
        "short_title": "Scanopy",
        "website": null,
        "status": "active",
        "short_abstract": "<p>The aim of the Scanopy project is to provide efficient algorithms for working with 3D laser scan data. The first main challenge is to improve the quality of raw scan data via filtering, scan completion and repair, making use of image information and symmetries. The second challenge is to and second to develop efficient data structures and algorithm to allow displaying huge point-based models. The third challenge is to improve the appearance of the models via lighting and relighting.</p>\r\n",
        "abstract": "<p>The aim of the Scanopy project is to provide efficient algorithms for working with 3D laser scan data. The first main challenge is to improve the quality of raw scan data via filtering, scan completion and repair, making use of image information and symmetries. The second challenge is to and second to develop efficient data structures and algorithm to allow displaying huge point-based models. The third challenge is to improve the appearance of the models via lighting and relighting.</p>\r\n",
        "start_date": "2006-10-01",
        "end_date": "2009-12-31",
        "leader_id": 193,
        "logo": null,
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": "812126",
                "comment": null
            }
        ],
        "research_areas": [
            "Rendering"
        ],
        "publications": [
            "BARSUKOV-2010-NEPC",
            "Bogner-2008-MTh",
            "PLUCH-2009-ANP",
            "pr10",
            "preiner_2010_GIPC",
            "PREINER-2009-GIPC",
            "Scheiblauer-2008-DCW",
            "SCHEIBLAUER-2009-IDCE",
            "WIMMER-2006-DWN",
            "WIMMER-2006-IP"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Scanopy",
        "__class": "Project"
    },
    {
        "id": "Visual Computing",
        "workgroup_id": "rend",
        "drupal_id": 4250,
        "drupal_path": "/research/projects/Visual-Computing",
        "name": "Visual Computing in Fundamental, Academic and Applied Science and Research",
        "name_de": null,
        "short_title": "Visual Computing",
        "website": null,
        "status": "active",
        "short_abstract": "Visual Computing is an emerging field of research, formed by the progressive fusion of traditionally separate scientific fields. Visual Computing deals with problems that are focused on processing or acquisition of graphical data. Most visual computing researchers work in the fields of computer graphics, digital image processing, pattern recognition, visualization and virtual reality. Since many new technologies and problems require expertise in several of these fields, research groups increasingly look for opportunities to improve their cooperation with neighboring fields. Austrian, as well as Russian research groups have ample experience in several of the fields that comprise visual computing, and have published high level results of their research. Following a successful seminar in Vienna last year, the Joint Seminar on Visual Computing in Fundamental, Academic and Applied Science and Research would give an opportunity for researchers of both countries to present their results to a larger audience of experts, to identify common problems and to participate in an open dialogue that may form the basis for future cooperative projects. The Seminar will mainly consist of presentations from all participants, dealing with topics from all areas of visual computing. Additionally, the seminar will include several keynote speakers, research facility tours and social events.",
        "abstract": "Visual Computing is an emerging field of research, formed by the progressive fusion of traditionally separate scientific fields. Visual Computing deals with problems that are focused on processing or acquisition of graphical data. Most visual computing researchers work in the fields of computer graphics, digital image processing, pattern recognition, visualization and virtual reality. Since many new technologies and problems require expertise in several of these fields, research groups increasingly look for opportunities to improve their cooperation with neighboring fields. Austrian, as well as Russian research groups have ample experience in several of the fields that comprise visual computing, and have published high level results of their research. Following a successful seminar in Vienna last year, the Joint Seminar on Visual Computing in Fundamental, Academic and Applied Science and Research would give an opportunity for researchers of both countries to present their results to a larger audience of experts, to identify common problems and to participate in an open dialogue that may form the basis for future cooperative projects. The Seminar will mainly consist of presentations from all participants, dealing with topics from all areas of visual computing. Additionally, the seminar will include several keynote speakers, research facility tours and social events.",
        "start_date": "2009-08-01",
        "end_date": "2009-10-31",
        "leader_id": 190,
        "logo": null,
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Visual-Computing",
        "__class": "Project"
    },
    {
        "id": "PVG",
        "workgroup_id": "vis",
        "drupal_id": 4252,
        "drupal_path": "/research/projects/PVG",
        "name": "PVG: Point-based Volume Graphics",
        "name_de": null,
        "short_title": "PVG",
        "website": null,
        "status": "active",
        "short_abstract": "<p>To implement and develop novel methods for reconstruction and visualisation of 3D Data in different grid structures</p>\r\n",
        "abstract": "<p>To implement and develop novel methods for reconstruction and visualisation of 3D Data in different grid structures</p>\r\n",
        "start_date": "2006-09-01",
        "end_date": "2009-09-30",
        "leader_id": 166,
        "logo": {
            "name": "logo.png",
            "path": "project:PVG",
            "type": "image/png",
            "size": 20147,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/PVG/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P 18547-N04",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "Buerger-2007-cmv",
            "Buerger-2007-eurovis",
            "buerger-2007-star",
            "fuchs_raphael_2007_par",
            "fuchs-2008-del",
            "fuchs-vortex",
            "heinzl_2006_RSDVCDM",
            "heinzl-2006-rep",
            "RAUTEK06",
            "toth-2007-ndd",
            "TR-186-2-07-06",
            "TR-186-2-08-02",
            "TR-186-2-08-08",
            "TR-186-2-09-01",
            "vucini_2008_rnp",
            "vucini_2009",
            "vucini_erald-2007-FRI",
            "vucini-2009-phd"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/PVG",
        "__class": "Project"
    },
    {
        "id": "d4328",
        "workgroup_id": "vis",
        "drupal_id": 4328,
        "drupal_path": "/research/projects/DiagVis-0",
        "name": "DiagVis\r\n2. year",
        "name_de": "DiagVis\r\nJahr 2 (KE 2009 offen)",
        "short_title": "DiagVis",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": "Das DiagVis-Projekt bezieht sich auf wissenschaftliche Forschung und Entwicklung von innovativen Computergraphikmethoden zur Erleichterung und Verbesserung medizinischer Diagnostik.",
        "start_date": "2007-10-01",
        "end_date": "2009-02-28",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/DiagVis-0",
        "__class": "Project"
    },
    {
        "id": "CROSSMOD",
        "workgroup_id": "rend",
        "drupal_id": 4259,
        "drupal_path": "/research/projects/CROSSMOD",
        "name": "Cross-Modal",
        "name_de": null,
        "short_title": "CROSSMOD",
        "website": null,
        "status": "active",
        "short_abstract": "The CROSSMOD (Cross-Modal Perceptual Interaction and Rendering) project studies the effects that the visual and audio channels have on each other and exploits these releations to improve rendering speed",
        "abstract": "Virtual environments (VEs) play an increasingly important role in our society. Currently two main\nsensorial channels are exploited in VEs: visual and auditory, although sound is greatly underused.\nThe ever-increasing scene complexity of VEs means that it is currently not possible to display\nhighly realistic scenes in real time despite the availability of modern high-performance graphics and\naudio processors. However, the realism and quality of a virtual image/sound needs to be as good as\nwhat the user can perceive: we only need to display what is necessary. Despite recent research in\npsychology, little work exists on cross-modal effects, i.e., the effects that each channel (visual and\nauditory) has on the other, to improve the efficiency and quality of VEs. CROSSMOD will study\nthese effects and develop a better understanding of how perceptual issues affect auditory/visual\ndisplay; this understanding will lead to the development of novel algorithms for selectively\nrendering VEs. The cross-modal effects studied will include the effect of spatial/latency congruence\non quality perception, attention-control, sound-induced changes in visual perception and\nfoveal/peripheral audiovisual effects. The research will be guided by its applicability to the\nimprovement of VE display and VE authoring. The solutions developed by CROSSMOD will\nenable the display of perceptually highly realistic environments in real time even for very complex\nscenes, as well as the use of active cross-modal effects such as attention control in the display and\nauthoring of VEs. An integrated software cross-modal manager will be developed, using results of\nexperiments identifying which cross-modal effects are useful for VE display. We will evaluate our\napproach on three target applications: computer games, design/architecture and clinical psychiatry,\nusing platforms adapted to each application: PCs with audio/graphics cards, a projector-enhanced\n“personal VE” setup and large-screen immersive VE systems.",
        "start_date": "2005-12-01",
        "end_date": "2008-12-31",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:CROSSMOD",
            "type": "image/png",
            "size": 24255,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/CROSSMOD/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "EU_IST_6th",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [
            "Perception"
        ],
        "publications": [
            "bernhard-2011-bmtf",
            "GRELAUD-2009-EPA",
            "labschuetz-2010-bms",
            "labschuetz-2011-rsr",
            "mattausch-2008-CHC",
            "Scherzer-2007-PCS",
            "SCHERZER-2008-FSR",
            "SUNDSTEDT-2008-ASF"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/CROSSMOD",
        "__class": "Project"
    },
    {
        "id": "NeuroViewer",
        "workgroup_id": "vis",
        "drupal_id": 4257,
        "drupal_path": "/research/projects/NeuroViewer",
        "name": "NeuroViewer: interactive visualization and exploration framework for neural networks of fruit fly brains",
        "name_de": null,
        "short_title": "NeuroViewer",
        "website": null,
        "status": "active",
        "short_abstract": "Goal of this project is to provide to the Research Institute of Molecular Pathology (IMP) an interactive visualization and exploration framework for neural networks of fruit fly brains, in the following called \"NeuroViewer\".",
        "abstract": null,
        "start_date": "2008-04-01",
        "end_date": "2008-12-31",
        "leader_id": 166,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/NeuroViewer",
        "__class": "Project"
    },
    {
        "id": "LEOPOLD",
        "workgroup_id": "External",
        "drupal_id": 4261,
        "drupal_path": "/research/projects/LEOPOLD",
        "name": "Lively Experience of the Past of Leopoldsberg from Digital Archaeological Data ",
        "name_de": null,
        "short_title": "LEOPOLD",
        "website": null,
        "status": "active",
        "short_abstract": "The main objective of this project is to research and develop tools to make abstract, digital archaeological and historical data perceivable for the public using mixed reality edutainment systems. Another goal is the development of a Harris Matrix editor to manage stratigraphic relations of an excavation site and use it to retrieve digital archaeological data.",
        "abstract": "<p>The main objective of this project is to research and develop tools to make abstract, digital archaeological and historical data perceivable for the public using mixed reality technology. Pastime events will be experienced in an interactive, multimodal way, simultaneously addressing more senses. For that purpose three mixed reality edutainment installations that mediate concrete historical facts will be constructed and evaluated. The pedagogical goal is to provoke the interest of users by surprising and exciting them. The installations are seamlessly embedded on the historical site and allow a blended experience of presence and pastime. The content and the story for the mixed reality installations are thereby directly derived from digital archaeological and historical data. The background for the story will be certain events or periods in history related to the Leopoldsberg in Vienna. They range from the age of the Celts to the end of World War II. Another research goal is the development of a tool to manage stratigraphic relations of an excavation site more efficiently. It is called Stratigraphic Sequence Composer or SSC for short and is used to build up and administer a representation of an archaeological stratification in form of a sequential diagram or Harris Matrix. It is planned to provide an interface to a data base and a GIS system, so that digital archaeological data can be directly accessed by selecting nodes of the Harris Matrix.</p>\r\n",
        "start_date": "2006-12-01",
        "end_date": "2008-11-30",
        "leader_id": 224,
        "logo": null,
        "funding_organisations": [
            {
                "id": "WWTF",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "TRA08",
            "TRA08b",
            "traxler-2009-dhm"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/LEOPOLD",
        "__class": "Project"
    },
    {
        "id": "ADAPT",
        "workgroup_id": "vis",
        "drupal_id": 4263,
        "drupal_path": "/research/projects/ADAPT",
        "name": "ADAPT: Advanced Diagnosis, Analysis and Planning Tools (in Medicine)",
        "name_de": null,
        "short_title": "ADAPT",
        "website": null,
        "status": "active",
        "short_abstract": "<p>A considerable improvement in the area of volume visualisation and data aquisition was achieved during the last years. This project will take advantage of the high computational power of todays PC Workstations and highly sophisticated algorithms in order to improve diagnosis.</p>\r\n",
        "abstract": "<p>A considerable improvement in the area of volume visualisation and data aquisition was achieved during the last years. This project will take advantage of the high computational power of todays PC Workstations and highly sophisticated algorithms in order to improve diagnosis.</p>\r\n",
        "start_date": "2001-11-01",
        "end_date": "2007-12-31",
        "leader_id": 162,
        "logo": {
            "name": "logo.png",
            "path": "project:ADAPT",
            "type": "image/png",
            "size": 5678,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/ADAPT/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "FFF",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "bruckner-2004-EVV",
            "bruckner-2005-ICV",
            "coto-2005-MAC",
            "Csebfalvi-2002-FPVF",
            "Csebfalvi-2002-SBICG",
            "Csebfalvi-2002-Smo",
            "facial2004",
            "grimm-2004-arefined",
            "GRIMM-2004-FDMX-P",
            "grimm-2004-memory",
            "GRIMM-2004-PPC",
            "grimm-2004-volume",
            "groeller-2005-dia",
            "Hladuvka-2002-Exp",
            "Kanitsar-2002-CPR",
            "Kanitsar-2003-Adva",
            "Kanitsar-2003-Dem",
            "Kanitsar-2004-Dia",
            "Kanitsar-thesis",
            "knapp_michael_2004_MAS",
            "knapp-2004-semi",
            "Neumann-2002-Fea",
            "phd-viola",
            "TR-186-2-05-01",
            "TR-186-2-05-02",
            "TR-186-2-05-03",
            "vessel2003",
            "Viola-2003-GPU",
            "Viola-2003-Har",
            "Viola-2003-NON",
            "Viola-2004-GPU",
            "viola-2004-har",
            "viola-2004-har2",
            "viola-2004-idv",
            "viola-2004-imp",
            "Viola-2004-ImpX",
            "Viola-2004-ImpX2",
            "viola-2005-imp",
            "xmastree2002"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/ADAPT",
        "__class": "Project"
    },
    {
        "id": "EU U-CREATE",
        "workgroup_id": "rend",
        "drupal_id": 4265,
        "drupal_path": "/research/projects/EU-U-CREATE",
        "name": "EU U-CREATE, Creative Authoring tools for Edutainment Applications",
        "name_de": null,
        "short_title": "EU U-CREATE",
        "website": null,
        "status": "active",
        "short_abstract": "U-CREATE is initiated by Alterface, Imagination and ion2s, three SMEs which are primarily active in the field of edutainment, i.e. the joining of education and entertainment (customers are museums, cultural institutions, entertainment parks¿) They share a common and important problem: efficient content creation. Be it interactive setups, Mixed Reality experiences, location-based services, all these technologies are worthless without content: content is always to be tackled or delivered at the same time as technology. However, content creation is a long process that can turn to nightmare when implementing large-scale projects. The solution is two words: authoring tool. A powerful, graphical, beyond the state-of-the-art authoring tool is needed that allows one to create elaborated contents in a fast and easy way. No such tool exists to date due to the highly innovative products commercialized by the SMEs. Such a tool will be created by the project. The authoring tool will increase competitiveness, because it significantly shortens production time (50% reduction of integration time) and effort (creation process affordable to non-specialists) for content development. It will also enable other people to create contents for the intended systems: SMEs can then sell more software while subcontracting or licensing the content production. It will also strengthen the European position in an authoring market dominated by US companies. SMEs alone cannot afford such a task, in terms of expertise but also in terms of resources. This project gathers the highly-specialized expertise from ZGDV, TUW and DIST which allows for the delivery of a prototype authoring tool. HadroNet will be the end-user serving the consortium and helping it to gather a larger community of end-users, in order to assess requirements, validate results and construct the basis of a commercial distribution system. Doing so, the project will set the first basis of a longer-term collaboration amongst all partners.",
        "abstract": "U-CREATE is initiated by Alterface, Imagination and ion2s, three SMEs which are primarily active in the field of edutainment, i.e. the joining of education and entertainment (customers are museums, cultural institutions, entertainment parks¿) They share a common and important problem: efficient content creation. Be it interactive setups, Mixed Reality experiences, location-based services, all these technologies are worthless without content: content is always to be tackled or delivered at the same time as technology. However, content creation is a long process that can turn to nightmare when implementing large-scale projects. The solution is two words: authoring tool. A powerful, graphical, beyond the state-of-the-art authoring tool is needed that allows one to create elaborated contents in a fast and easy way. No such tool exists to date due to the highly innovative products commercialized by the SMEs. Such a tool will be created by the project. The authoring tool will increase competitiveness, because it significantly shortens production time (50% reduction of integration time) and effort (creation process affordable to non-specialists) for content development. It will also enable other people to create contents for the intended systems: SMEs can then sell more software while subcontracting or licensing the content production. It will also strengthen the European position in an authoring market dominated by US companies. SMEs alone cannot afford such a task, in terms of expertise but also in terms of resources. This project gathers the highly-specialized expertise from ZGDV, TUW and DIST which allows for the delivery of a prototype authoring tool. HadroNet will be the end-user serving the consortium and helping it to gather a larger community of end-users, in order to assess requirements, validate results and construct the basis of a commercial distribution system. Doing so, the project will set the first basis of a longer-term collaboration amongst all partners.",
        "start_date": "2005-06-15",
        "end_date": "2007-12-31",
        "leader_id": 190,
        "logo": null,
        "funding_organisations": [
            {
                "id": "EU_IST_6th",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [
            "VR"
        ],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/EU-U-CREATE",
        "__class": "Project"
    },
    {
        "id": "Treelumination",
        "workgroup_id": "rend",
        "drupal_id": 4267,
        "drupal_path": "/research/projects/Treelumination",
        "name": "TreeLumination",
        "name_de": null,
        "short_title": "Treelumination",
        "website": null,
        "status": "active",
        "short_abstract": "The aim of the project Treelumination is to enhance the realism and visual quality of applications that need to display trees and tree-like plants in a real-time setting.",
        "abstract": "The aim of the Treelumination project is to enhance the realism and visual quality of applications that need to display trees and tree-like plants in a real-time setting. This is important because real-time photo-realistic illumination effects have not been used before for rendering trees in real-time rendering applications, although they are crucial for believable high-quality rendering. The effects that we are going to consider include shadows and self-shadowing, translucency of leaves, global illumination and high-dynamic range rendering.",
        "start_date": "2004-11-01",
        "end_date": "2007-11-30",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:Treelumination",
            "type": "image/png",
            "size": 29100,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/Treelumination/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P17261-N04",
                "comment": null
            }
        ],
        "research_areas": [
            "Rendering"
        ],
        "publications": [
            "Brandorff_06_PON",
            "CADIK-2006-IAQ",
            "CADIK-2008-EHD",
            "G_P_06_RFS",
            "Grasberger_08_VV",
            "Grasberger_2008_ISR",
            "guerrero-2008-sli",
            "Habel_07_xms",
            "Habel_08_SSH",
            "Habel_09_PGT",
            "Habel_2007_IAG",
            "Habel_2007_RTT",
            "Habel_2009_PhD",
            "Habel_RAV_2010",
            "Habel-09-RAT",
            "Habel-09-THB",
            "Hartl_2006_OgC",
            "Ishmukhametov_Denis_2011_EIN",
            "jeschke-05-AIP",
            "jeschke-05-ISTAR",
            "jeschke-09-praguetalk",
            "JESCHKE-2007-ISC",
            "Kogelnig_Philip-2008-OGC",
            "Luksch_2007_RHR",
            "Mantler-06-landscape",
            "MANTLER-2007-DMBBC",
            "Rudolf_06_IDM",
            "Selig_2005_WRO",
            "Selig-2007-VegAnim",
            "TR-186-2-07-01"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Treelumination",
        "__class": "Project"
    },
    {
        "id": "GameTools",
        "workgroup_id": "rend",
        "drupal_id": 4269,
        "drupal_path": "/research/projects/GameTools",
        "name": "GameTools",
        "name_de": null,
        "short_title": "GameTools",
        "website": null,
        "status": "active",
        "short_abstract": "The <a href=\"http://www.gametools.org/\" target=\"_blank\">GameTools Project</a> (GTP) researches and implements <i>next generation realtime 3D Libraries for Geometry/Plants, Visibility and Global Illumination</i> under <i>C++/DirectX9</i>. Platforms: <a href=\"http://www.ogre3d.org/\">OGRE</a> for PC and <a href=\"http://www.shark3d.com/\">Shark3D</a> middleware for PC & consoles.",
        "abstract": "Research and creation of next generation <i>Realtime 3D Libraries</i> for Geometry/Plants, Visibility and Global Illumination is the agenda of the <a href=\"http://www.gametools.org/\" target=\"_blank\">EU GameTools Project</a>. The GTP brings together leading computer graphics experts from Austria, France, Hungary and Spain with European industrial partners from the fields of computer game development and virtual reality. The C++/DirectX 9 libraries are being created for the <a href=\"http://www.ogre3d.org/\">OGRE</a> 3D engine, with videogame console support (Xbox, PS2, Xbox 360, PS3) through the commercial <a href=\"http://www.shark3d.com/\">Shark3D</a> middleware engine.\n\nAdditional indurtrial partners can get preliminary access to the techology by becoming members of the <a href=\"http://www.gametools.org/html/join_the_gtp__.html\">GTP Special Interest Group</a>.",
        "start_date": "2004-10-01",
        "end_date": "2007-06-30",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:GameTools",
            "type": "image/png",
            "size": 157170,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/GameTools/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "EU_IST_6th",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [
            "Geometry",
            "Rendering"
        ],
        "publications": [
            "bittner-2005-egsr",
            "CHARALAMBOS-2007-HLOD",
            "CHIU-2008-PEN",
            "GIEGL-2006-QVS",
            "GIEGL-2007-FVS",
            "GIEGL-2007-QV1",
            "GIEGL-2007-UNP",
            "havran-2005-egsr",
            "havran-2006-tut",
            "jeschke-05-AIP",
            "MATTAUSCH-2006-AVC",
            "mattausch-2007-iav",
            "MATTAUSCH-2007-OSP",
            "TR-186-2-06-02",
            "vis-foa",
            "Wimmer-2005-HOQ",
            "WIMMER-2006-PSM",
            "WIMMER-2006-SIV",
            "WIMMER-2007-GAR",
            "WONKA-2006-GVS"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/GameTools",
        "__class": "Project"
    },
    {
        "id": "MARG",
        "workgroup_id": "rend",
        "drupal_id": 4271,
        "drupal_path": "/research/projects/MARG",
        "name": "Mobile Augmented Reality-Museumsführer",
        "name_de": null,
        "short_title": "MARG",
        "website": null,
        "status": "active",
        "short_abstract": "<p>This project aims at the creation and real-world deployment of a handheld computer guide for museum visitors, based on Augmented Reality.</p>\r\n",
        "abstract": "<p>This project aims at the creation and real-world deployment of a handheld computer guide for museum visitors, based on Augmented Reality.</p>\r\n",
        "start_date": "2005-02-01",
        "end_date": "2007-01-31",
        "leader_id": 190,
        "logo": null,
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [
            "VR"
        ],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/MARG",
        "__class": "Project"
    },
    {
        "id": "Skydome",
        "workgroup_id": "rend",
        "drupal_id": 4273,
        "drupal_path": "/research/projects/Skydome",
        "name": "Skydome",
        "name_de": null,
        "short_title": "Skydome",
        "website": null,
        "status": "active",
        "short_abstract": "<p>The aim of the Skydome Project is the improvement of day skylight models.</p>\r\n",
        "abstract": "<p>The aim of the Skydome Project is the improvement of day skylight models.</p>\r\n",
        "start_date": "2005-01-01",
        "end_date": "2006-12-31",
        "leader_id": 192,
        "logo": {
            "name": "logo.png",
            "path": "project:Skydome",
            "type": "image/png",
            "size": 21323,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/Skydome/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "daubner-2007-HDR",
            "Wilkie-2004-AMS",
            "wilkie-2006-dfs",
            "Zotti-2005-vis",
            "zotti-2006-dgm",
            "zotti-2006-pla",
            "zotti-2007-hdr",
            "zotti-2007-PhD",
            "zotti-2007-wscg"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/Skydome",
        "__class": "Project"
    },
    {
        "id": "realreflect",
        "workgroup_id": "rend",
        "drupal_id": 4275,
        "drupal_path": "/research/projects/realreflect",
        "name": "RealReflect",
        "name_de": null,
        "short_title": "realreflect",
        "website": null,
        "status": "active",
        "short_abstract": "The RealReflect project is an endeavour to increase the realism of Virtual Reality technology to levels where it can be used for meaningful qualitative reviews of virtual prototypes and scenes.",
        "abstract": "The RealReflect project is an endeavour to increase the realism of Virtual Reality technology to levels where it can be used for meaningful qualitative reviews of virtual prototypes and scenes. This has not been possible so far, and would be of considerable benefit to those VR user groups - such as the automotive industry or architecture - who routinely have to take important design decisions about object appearance long before the actual product is first assembled.",
        "start_date": "2002-04-01",
        "end_date": "2005-11-30",
        "leader_id": 222,
        "logo": {
            "name": "logo.png",
            "path": "project:realreflect",
            "type": "image/png",
            "size": 2067,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/realreflect/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "EU_IST_6th",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "Artusi-2003-Del",
            "Devlin-2002-STA",
            "havran-2005-sccg",
            "Neum03CAMR",
            "Neum04GCM",
            "neumann-2004-aip",
            "Neumann-2004-HOB",
            "neumann-2004-ipm",
            "neumann-2006-gamma",
            "Roch2006",
            "zotti-2005-lum"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/realreflect",
        "__class": "Project"
    },
    {
        "id": "EU Virtual Showcases",
        "workgroup_id": "rend",
        "drupal_id": 4276,
        "drupal_path": "/research/projects/EU-Virtual-Showcases",
        "name": "EU Virtual Showcases",
        "name_de": null,
        "short_title": "EU Virtual Showcases",
        "website": null,
        "status": "active",
        "short_abstract": "<p>Showcases belong to the standard equipment of museums and other exhibitions. they are used to disply artifacts to the public, to make them available to a larger audience, and to protect them against detrimental effects of the environment. With Virtual showcases we want to introduce a new medium that allows to present hybrid exhibits.</p>\r\n",
        "abstract": "<p>Showcases belong to the standard equipment of museums and other exhibitions. they are used to disply artifacts to the public, to make them available to a larger audience, and to protect them against detrimental effects of the environment. With Virtual showcases we want to introduce a new medium that allows to present hybrid exhibits.</p>\r\n",
        "start_date": "2001-09-01",
        "end_date": "2005-09-30",
        "leader_id": 190,
        "logo": null,
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/EU-Virtual-Showcases",
        "__class": "Project"
    },
    {
        "id": "AngioVis",
        "workgroup_id": "vis",
        "drupal_id": 4278,
        "drupal_path": "/research/projects/AngioVis",
        "name": "AngioVis: Visualization Tools for Peripheral CT-Angiography",
        "name_de": null,
        "short_title": "AngioVis",
        "website": null,
        "status": "active",
        "short_abstract": "post-processing and visualization of large CTA datasets of the peripheral extremities",
        "abstract": "The AngioVis (Angiographic Visualization) project deals with post-processing and visualization of large CTA datasets of the peripheral extremities. The aim of this project is to develop a diagnosis tool for detection and classification of arterial diseases in routine clinical use. The goals cover:\n(1) Developing highly reliable visualization methods. (2) Speeding up post-processing by introducing automatic and semi-automatic tools. (3) Optimising the clinical workflow.\n\n",
        "start_date": "2002-04-01",
        "end_date": "2004-04-30",
        "leader_id": 166,
        "logo": {
            "name": "logo.png",
            "path": "project:AngioVis",
            "type": "image/png",
            "size": 38188,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/AngioVis/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": "P 15217",
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "Auzinger_Mistelbauer_2013_CSR",
            "bernhard-2006-dvrcta",
            "Cruz-thesis",
            "Eckelt_2017",
            "FISCHL-2012-CTASEG",
            "fmistelbauer-2014-adict",
            "mistelbauer-2012-cr",
            "mistelbauer-2012-ssv",
            "mistelbauer-2013-cfa",
            "TR-186-2-15-03"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/AngioVis",
        "__class": "Project"
    },
    {
        "id": "UrbanViz",
        "workgroup_id": "rend",
        "drupal_id": 4280,
        "drupal_path": "/research/projects/UrbanViz",
        "name": "UrbanViz",
        "name_de": null,
        "short_title": "UrbanViz",
        "website": null,
        "status": "active",
        "short_abstract": "This project aims at the creation and real-time display of large and medium-scale urban environments.",
        "abstract": "The UrbanViz (Urban Visualization - Real-time Rendering of Urban Environments) project aims at the creation of an integrated solution for modeling and real-time visualization of large and medium-scale urban environments. This system can be the basis for applications like traffic and driving simulation, architectural simulations, information visualization and computer games. In the planning process it is useful to simulate the environment before changes in the real city are made. In this context three-dimensional computer simulation gained immense popularity, not only because it produces appealing graphics, but it is a more adequate representation for a three-dimensional environment and easier to understand than conventional 2D plans. Traffic simulation, visual impact analysis and information visualization for urban information systems are applications in the local planning area that would all profit from our framework.",
        "start_date": "1999-11-01",
        "end_date": "2003-10-31",
        "leader_id": 193,
        "logo": {
            "name": "logo.png",
            "path": "project:UrbanViz",
            "type": "image/png",
            "size": 19420,
            "orig_name": "logo.png",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/UrbanViz/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [
            {
                "id": "fwf",
                "contract_number": null,
                "comment": null
            }
        ],
        "research_areas": [],
        "publications": [
            "Bittner-2001-Vis",
            "Bittner-2003-Vis",
            "Bittner-2004-CHC",
            "bittner-2005-egsr",
            "HUMMEL-2001-MSE",
            "Jeschke-2002-LEM",
            "Jeschke-2002-LEMA",
            "Jeschke-2002-TDM",
            "Jeschke-2002-TDMR",
            "LEHNINGER-2007-SSV",
            "PIRINGER-2003-HBOQ",
            "TR-186-2-02-04",
            "Wimmer-1999-FWIa",
            "Wimmer-1999-FWIb",
            "Wimmer-2001-PBI",
            "Wimmer-2001-Poi",
            "Wimmer-2003-RTE",
            "Wimmer-2004-LSPM",
            "wonka-2000-VisP",
            "Wonka-2001-Ins",
            "Wonka-2001-IV",
            "Wonka-2003-Ins"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/projects/UrbanViz",
        "__class": "Project"
    },
    {
        "id": "d9493",
        "workgroup_id": "rend",
        "drupal_id": 9493,
        "drupal_path": "/research/projects/PAVR",
        "name": "Platform for Animation and Virtual Reality",
        "name_de": null,
        "short_title": "PAVR",
        "website": null,
        "status": "active",
        "short_abstract": null,
        "abstract": null,
        "start_date": "1997-01-01",
        "end_date": "2001-10-31",
        "leader_id": 190,
        "logo": {
            "name": "logo.gif",
            "path": "project:d9493",
            "type": "image/gif",
            "size": 11035,
            "orig_name": "logo.gif",
            "thumb_url": "https://www.cg.tuwien.ac.at/research/projects/d9493/logo:thumb{{size}}.png",
            "thumb_image_sizes": [
                120,
                300
            ]
        },
        "funding_organisations": [],
        "research_areas": [],
        "publications": [],
        "url": "https://www.cg.tuwien.ac.at/research/projects/PAVR",
        "__class": "Project"
    }
]
