Publication List

2024

  • C. Popp and D. T. Murphy, “Speech intelligibility versus congruency: user preferences of the acoustics of virtual reality game spaces,” Virtual worlds, vol. 3, iss. 1, p. 40–61, 2024. doi:10.3390/virtualworlds3010003
    [BibTeX] [Abstract] [Download PDF]

    3D audio spatializers for Virtual Reality (VR) can use the acoustic properties of the surfaces of a visualised game space to calculate a matching reverb. However, this approach could lead to reverbs that impair the tasks performed in such a space, such as listening to speech-based audio. Sound designers would then have to alter the room’s acoustic properties independently of its visualisation to improve speech intelligibility, causing audio-visual incongruency. As user expectation of simulated room acoustics regarding speech intelligibility in VR has not been studied, this study asked participants to rate the congruency of reverbs and their visualisations in 6-DoF VR while listening to speech-based audio. The participants compared unaltered, matching reverbs with sound-designed, mismatching reverbs. The latter feature improved D50s and reduced RT60s at the cost of lower audio-visual congruency. Results suggest participants preferred improved reverbs only when the unaltered reverbs had comparatively low D50s or excessive ringing. Otherwise, too dry or too reverberant reverbs were disliked. The range of expected RT60s depended on the surface visualisation. Differences in timbre between the reverbs may not affect preferences as strongly as shorter RT60s. Therefore, sound designers can intervene and prioritise speech intelligibility over audio-visual congruency in acoustically challenging game spaces.

    @Article{virtualworlds3010003,
    AUTHOR = {Popp, Constantin and Murphy, Damian T.},
    TITLE = {Speech Intelligibility versus Congruency: User Preferences of the Acoustics of Virtual Reality Game Spaces},
    JOURNAL = {Virtual Worlds},
    VOLUME = {3},
    YEAR = {2024},
    NUMBER = {1},
    PAGES = {40--61},
    URL = {https://www.mdpi.com/2813-2084/3/1/3},
    ISSN = {2813-2084},
    ABSTRACT = {3D audio spatializers for Virtual Reality (VR) can use the acoustic properties of the surfaces of a visualised game space to calculate a matching reverb. However, this approach could lead to reverbs that impair the tasks performed in such a space, such as listening to speech-based audio. Sound designers would then have to alter the room’s acoustic properties independently of its visualisation to improve speech intelligibility, causing audio-visual incongruency. As user expectation of simulated room acoustics regarding speech intelligibility in VR has not been studied, this study asked participants to rate the congruency of reverbs and their visualisations in 6-DoF VR while listening to speech-based audio. The participants compared unaltered, matching reverbs with sound-designed, mismatching reverbs. The latter feature improved D50s and reduced RT60s at the cost of lower audio-visual congruency. Results suggest participants preferred improved reverbs only when the unaltered reverbs had comparatively low D50s or excessive ringing. Otherwise, too dry or too reverberant reverbs were disliked. The range of expected RT60s depended on the surface visualisation. Differences in timbre between the reverbs may not affect preferences as strongly as shorter RT60s. Therefore, sound designers can intervene and prioritise speech intelligibility over audio-visual congruency in acoustically challenging game spaces.},
    DOI = {10.3390/virtualworlds3010003}
    }

2023

  • G. Acosta Martínez and H. Daffern, “Complexity of vocal vibrato in opera and jazz recordings: insights from entropy and recurrence analyses,” Journal of voice, 2023. doi:10.1016/j.jvoice.2023.11.020
    [BibTeX]
    @article{acosta2023ainpress,
    title={Complexity of vocal vibrato in opera and jazz recordings: Insights from entropy and recurrence analyses},
    author={Acosta Martínez, Gerardo and Daffern, Helena},
    journal={Journal of Voice},
    year={2023},
    doi={10.1016/j.jvoice.2023.11.020},
    publisher={Elsevier}
    }

  • D. Geary, J. Francombe, K. Hentschel, and D. Murphy, “Using design dimensions to develop a multi-device audio experience through workshops and prototyping,” in Proceedings of the 18th international audio mostly conference, New York, NY, USA, 2023, p. 71–78. doi:10.1145/3616195.3616199
    [BibTeX] [Abstract] [Download PDF]

    Designing audio experiences for heterogeneous arrays of multiple devices is challenging, and researchers have tried to identify useful design practices. A set of design dimensions have been proposed, providing researchers and creative practitioners with a framework for understanding the different design considerations for multi-device audio; however, they have yet to be used for scoping and developing a new experience. This work investigates the utility of the design dimensions for exploring and prototyping new multi-device audio experiences. Three workshops were conducted with audio professionals to see how the design dimensions could be used to form new ideas. Using the resulting ideas, a multi-device audio system combining loudspeakers and earbuds, and an experience based on that system, were created and demonstrated. The design dimensions were found to be useful for understanding multi-device audio experiences and for quickly forming new ideas. In addition, the dimensions were a helpful reference during experience development for testing different design choices, particularly for audio allocation.

    @InProceedings{10.1145/3616195.3616199,
    author = {Geary, David and Francombe, Jon and Hentschel, Kristian and Murphy, Damian},
    title = {Using Design Dimensions to Develop a Multi-Device Audio Experience through Workshops and Prototyping},
    booktitle = {Proceedings of the 18th International Audio Mostly Conference},
    year = {2023},
    series = {AM '23},
    pages = {71–78},
    address = {New York, NY, USA},
    publisher = {Association for Computing Machinery},
    abstract = {Designing audio experiences for heterogeneous arrays of multiple devices is challenging, and researchers have tried to identify useful design practices. A set of design dimensions have been proposed, providing researchers and creative practitioners with a framework for understanding the different design considerations for multi-device audio; however, they have yet to be used for scoping and developing a new experience. This work investigates the utility of the design dimensions for exploring and prototyping new multi-device audio experiences. Three workshops were conducted with audio professionals to see how the design dimensions could be used to form new ideas. Using the resulting ideas, a multi-device audio system combining loudspeakers and earbuds, and an experience based on that system, were created and demonstrated. The design dimensions were found to be useful for understanding multi-device audio experiences and for quickly forming new ideas. In addition, the dimensions were a helpful reference during experience development for testing different design choices, particularly for audio allocation.},
    doi = {10.1145/3616195.3616199},
    isbn = {9798400708183},
    keywords = {multi-device, experience, hearables, audio, workshops, idea generation, prototyping, design},
    location = {Edinburgh, United Kingdom},
    numpages = {8},
    url = {https://doi.org/10.1145/3616195.3616199},
    }

  • J. Harrison, A. W. Archer-Boyd, J. Francombe, C. Pike, and D. T. Murphy, “The relationship between environmental context and attentional engagement in podcast listening experiences,” Frontiers in psychology, vol. 13, 2023. doi:10.3389/fpsyg.2022.1074320
    [BibTeX] [Abstract] [Download PDF]

    IntroductionPrevious research has shown that podcasts are most frequently consumed using mobile listening devices across a wide variety of environmental, situational, and social contexts. To date, no studies have investigated how an individual’s environmental context might influence their attentional engagement in podcast listening experiences. Improving understanding of the contexts in which episodes of listening take place, and how they might affect listener engagement, could be highly valuable to researchers and producers working in the fields of object-based and personalized media.MethodsAn online questionnaire on listening habits and behaviors was distributed to a sample of 264 podcast listeners. An exploratory factor analysis was run to identify factors of environmental context that influence attentional engagement in podcast listening experiences. Five aspects of podcast listening engagement were also defined and measured across the sample.ResultsThe exploratory factor analysis revealed five factors of environmental context labeled as: outdoors, indoors & at home, evenings, soundscape & at work, and exercise. The aspects of podcast listening engagement provided a comprehensive quantitative account of contemporary podcast listening experiences.DiscussionThe results presented support the hypothesis that elements of a listener’s environmental context can influence their attentional engagement in podcast listening experiences. The soundscape & at work factor suggests that some listeners actively choose to consume podcasts to mask disturbing stimuli in their surrounding soundscape. Further analysis suggested that the proposed factors of environmental context were positively correlated with the measured aspects of podcast listening engagement. The results are highly pertinent to the fields of podcast studies, mobile listening experiences, and personalized media, and provide a basis for researchers seeking to explore how other forms of listening context might influence attentional engagement.

    @ARTICLE{10.3389/fpsyg.2022.1074320,
    AUTHOR={Harrison, Jay and Archer-Boyd, Alan W. and Francombe, Jon and Pike, Chris and Murphy, Damian T.},
    TITLE={The relationship between environmental context and attentional engagement in podcast listening experiences},
    JOURNAL={Frontiers in Psychology},
    VOLUME={13},
    YEAR={2023},
    URL={https://www.frontiersin.org/articles/10.3389/fpsyg.2022.1074320},
    DOI={10.3389/fpsyg.2022.1074320},
    ISSN={1664-1078},
    ABSTRACT={IntroductionPrevious research has shown that podcasts are most frequently consumed using mobile listening devices across a wide variety of environmental, situational, and social contexts. To date, no studies have investigated how an individual's environmental context might influence their attentional engagement in podcast listening experiences. Improving understanding of the contexts in which episodes of listening take place, and how they might affect listener engagement, could be highly valuable to researchers and producers working in the fields of object-based and personalized media.MethodsAn online questionnaire on listening habits and behaviors was distributed to a sample of 264 podcast listeners. An exploratory factor analysis was run to identify factors of environmental context that influence attentional engagement in podcast listening experiences. Five aspects of podcast listening engagement were also defined and measured across the sample.ResultsThe exploratory factor analysis revealed five factors of environmental context labeled as: outdoors, indoors & at home, evenings, soundscape & at work, and exercise. The aspects of podcast listening engagement provided a comprehensive quantitative account of contemporary podcast listening experiences.DiscussionThe results presented support the hypothesis that elements of a listener's environmental context can influence their attentional engagement in podcast listening experiences. The soundscape & at work factor suggests that some listeners actively choose to consume podcasts to mask disturbing stimuli in their surrounding soundscape. Further analysis suggested that the proposed factors of environmental context were positively correlated with the measured aspects of podcast listening engagement. The results are highly pertinent to the fields of podcast studies, mobile listening experiences, and personalized media, and provide a basis for researchers seeking to explore how other forms of listening context might influence attentional engagement.}
    }

  • A. Foteinou, D. Murphy, and J. P. D. Cooper, “An acoustic reconstruction of the house of commons, c. 1820–1834,” Acoustics, vol. 5, iss. 1, p. 193–215, 2023. doi:10.3390/acoustics5010012
    [BibTeX] [Abstract] [Download PDF]

    This paper presents an acoustic reconstruction of the UK House of Commons between c. 1820 and 1834. Focusing on a historically important site where political decisions were debated over the centuries, we aim to simulate and present the intangible principles of the acoustic properties and sounds heard within the space. The acoustic model was created based on available historical evidence with the aid of commercial acoustic simulation software. We discuss the decisions made for this reconstruction based on further experimentation with the acoustic characteristics of the constituent materials and settings of the available software. An additional comparison of the achieved acoustic results with spaces of similar historical importance and layout is presented, as a calibration of the model with in situ measurements was not possible in this case study. The values of T30, EDT, C50 and Ts are presented, while auralization examples are also available for a subjective evaluation of the results.

    @Article{acoustics5010012,
    AUTHOR = {Foteinou, Aglaia and Murphy, Damian and Cooper, J. P. D.},
    TITLE = {An Acoustic Reconstruction of the House of Commons, c. 1820–1834},
    JOURNAL = {Acoustics},
    VOLUME = {5},
    YEAR = {2023},
    NUMBER = {1},
    PAGES = {193--215},
    URL = {https://www.mdpi.com/2624-599X/5/1/12},
    ISSN = {2624-599X},
    ABSTRACT = {This paper presents an acoustic reconstruction of the UK House of Commons between c. 1820 and 1834. Focusing on a historically important site where political decisions were debated over the centuries, we aim to simulate and present the intangible principles of the acoustic properties and sounds heard within the space. The acoustic model was created based on available historical evidence with the aid of commercial acoustic simulation software. We discuss the decisions made for this reconstruction based on further experimentation with the acoustic characteristics of the constituent materials and settings of the available software. An additional comparison of the achieved acoustic results with spaces of similar historical importance and layout is presented, as a calibration of the model with in situ measurements was not possible in this case study. The values of T30, EDT, C50 and Ts are presented, while auralization examples are also available for a subjective evaluation of the results.},
    DOI = {10.3390/acoustics5010012}
    }

  • J. Williams, J. Francombe, and D. Murphy, “Evaluating the influence of room illumination on camera-based physiological measurements for the assessment of screen-based media,” Applied sciences, vol. 13, iss. 14, 2023. doi:10.3390/app13148482
    [BibTeX] [Abstract] [Download PDF]

    Camera-based solutions can be a convenient means of collecting physiological measurements indicative of psychological responses to stimuli. However, the low illumination playback conditions commonly associated with viewing screen-based media oppose the bright conditions recommended for accurately recording physiological data with a camera. A study was designed to determine the feasibility of obtaining physiological data, for psychological insight, in illumination conditions representative of real world viewing experiences. In this study, a novel method was applied for testing a first-of-its-kind system for measuring both heart rate and facial actions from video footage recorded with a single discretely placed camera. Results suggest that conditions representative of a bright domestic setting should be maintained when using this technology, despite this being considered a sub-optimal playback condition. Further analyses highlight that even within this bright condition, both the camera-measured facial action and heart rate data contained characteristic errors. In future research, the influence of these performance issues on psychological insights may be mitigated by reducing the temporal resolution of the heart rate measurements and ignoring fast and low-intensity facial movements.

    @Article{app13148482,
    AUTHOR = {Williams, Joseph and Francombe, Jon and Murphy, Damian},
    TITLE = {Evaluating the Influence of Room Illumination on Camera-Based Physiological Measurements for the Assessment of Screen-Based Media},
    JOURNAL = {Applied Sciences},
    VOLUME = {13},
    YEAR = {2023},
    NUMBER = {14},
    ARTICLE-NUMBER = {8482},
    URL = {https://www.mdpi.com/2076-3417/13/14/8482},
    ISSN = {2076-3417},
    ABSTRACT = {Camera-based solutions can be a convenient means of collecting physiological measurements indicative of psychological responses to stimuli. However, the low illumination playback conditions commonly associated with viewing screen-based media oppose the bright conditions recommended for accurately recording physiological data with a camera. A study was designed to determine the feasibility of obtaining physiological data, for psychological insight, in illumination conditions representative of real world viewing experiences. In this study, a novel method was applied for testing a first-of-its-kind system for measuring both heart rate and facial actions from video footage recorded with a single discretely placed camera. Results suggest that conditions representative of a bright domestic setting should be maintained when using this technology, despite this being considered a sub-optimal playback condition. Further analyses highlight that even within this bright condition, both the camera-measured facial action and heart rate data contained characteristic errors. In future research, the influence of these performance issues on psychological insights may be mitigated by reducing the temporal resolution of the heart rate measurements and ignoring fast and low-intensity facial movements.},
    DOI = {10.3390/app13148482}
    }

  • P. Cairns, H. Daffern, and G. Kearney, “A DAW-based approach to immersive audio system evaluation in Network Music Performance contexts,” in Audio Engineering Society Conference: AES 2023 International Conference on Spatial and Immersive Audio, 2023.
    [BibTeX] [Download PDF]
    @inproceedings{cairns2023daw,
    title={{A DAW-based approach to immersive audio system evaluation in Network Music Performance contexts}},
    author={Cairns, Patrick and Daffern, Helena and Kearney, Gavin},
    booktitle={{Audio Engineering Society Conference: AES 2023 International Conference on Spatial and Immersive Audio}},
    year={2023},
    organization={Audio Engineering Society},
    url = {https://www.aes.org/e-lib/browse.cfm?elib=22186}
    }

  • P. Cairns, A. Hunt, D. Johnston, J. Cooper, B. Lee, H. Daffern, and G. Kearney, “Evaluation of metaverse music performance with bbc maida vale recording studios,” J. audio eng. soc, vol. 71, iss. 6, p. 313–325, 2023.
    [BibTeX] [Download PDF]
    @article{cairns2023evaluation,
    title = {Evaluation of Metaverse Music Performance With BBC Maida Vale Recording Studios},
    author = {Cairns, Patrick and Hunt, Anthony and Johnston, Daniel and Cooper, Jacob and Lee, Ben and Daffern, Helena and Kearney, Gavin},
    journal = {J. Audio Eng. Soc},
    volume = {71},
    number = {6},
    pages = {313--325},
    year = {2023},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=22139}
    }

  • J. Cooper, H. Daffern, and G. Kearney, “Perceptual thresholds for angular decomposition of direct and reverberant sound in spatial room impulse responses,” in Audio engineering society conference: aes 2023 international conference on spatial and immersive audio, 2023.
    [BibTeX] [Download PDF]
    @Conference{Cooper2023,
    author = {Cooper, Jacob and Daffern, Helena and Kearney, Gavin},
    title = {Perceptual thresholds for angular decomposition of direct and reverberant sound in Spatial Room Impulse Responses},
    booktitle = {Audio Engineering Society Conference: AES 2023 International Conference on Spatial and Immersive Audio},
    year = {2023},
    month = {Aug},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=22180},
    }

  • A. Hunt, H. Daffern, and G. Kearney, “Avatar representation in Extended Reality for Immersive Networked Music Performance,” in Audio Engineering Society Conference: AES 2023 International Conference on Spatial and Immersive Audio, 2023.
    [BibTeX] [Download PDF]
    @inproceedings{hunt2023avatar,
    title={{Avatar representation in Extended Reality for Immersive Networked Music Performance}},
    author={Hunt, Anthony and Daffern, Helena and Kearney, Gavin},
    booktitle={{Audio Engineering Society Conference: AES 2023 International Conference on Spatial and Immersive Audio}},
    year={2023},
    organization={Audio Engineering Society},
    url = {https://www.aes.org/e-lib/browse.cfm?elib=22183}
    }

  • J. Paterson and O. Kadel, “Audio for extended realities: A case study informed exposition,” Convergence: The International Journal of Research into New Media Technologies, vol. 29, iss. 3, pp. 555-798, 2023. doi:10.1177/13548565231169723
    [BibTeX]
    @Article{Kadel2023,
    author = {Paterson, J. and Kadel, O.},
    title = {{Audio for extended realities: A case study informed exposition}},
    journal = {{Convergence: The International Journal of Research into New Media Technologies}},
    year = {2023},
    volume = {29},
    number = {3},
    pages = {555-798},
    doi = {10.1177/13548565231169723},
    }

  • B. Lee, T. Rudzki, J. Skoglund, and G. Kearney, “Context-Based Evaluation of the Opus Audio Codec for Spatial Audio Content in Virtual Reality,” Journal of the Audio Engineering Society, vol. 71, iss. 4, p. 145–154, 2023.
    [BibTeX] [Download PDF]
    @Article{Lee2023,
    author = {Lee, Ben and Rudzki, Tomasz and Skoglund, Jan and Kearney, Gavin},
    title = {{Context-Based Evaluation of the Opus Audio Codec for Spatial Audio Content in Virtual Reality}},
    journal = {{Journal of the Audio Engineering Society}},
    year = {2023},
    volume = {71},
    number = {4},
    pages = {145--154},
    url = {https://www.aes.org/e-lib/browse.cfm?elib=22037},
    }

  • H. Mi, G. Kearney, and H. Daffern, “Perceptual Similarities between Artificial Reverberation Algorithms and Real Reverberation,” Applied Sciences, vol. 13, iss. 2, p. 840, 2023.
    [BibTeX] [Download PDF]
    @article{mi2023perceptual,
    title={{Perceptual Similarities between Artificial Reverberation Algorithms and Real Reverberation}},
    author={Mi, Huan and Kearney, Gavin and Daffern, Helena},
    journal={{Applied Sciences}},
    volume={13},
    number={2},
    pages={840},
    year={2023},
    publisher={MDPI},
    url = {https://www.mdpi.com/2076-3417/13/2/840}
    }

  • G. Acosta and H. Daffern, “A pilot study of vocal vibrato incorporating nonlinear time series analysis,” in Proceedings of the 5th stockholm music acoustic conference, 2023.
    [BibTeX]
    @inproceedings{p2,
    title={A pilot study of vocal vibrato incorporating nonlinear time series analysis},
    author={Acosta, Gerardo and Daffern, Helena},
    booktitle={Proceedings of the 5th Stockholm Music Acoustic Conference},
    year={2023}
    }

  • S. Parkinson, S. Schumann, A. Taylor, C. Fenton, G. Kearney, M. Garside, and D. Johnston, “SoundFields: A Virtual Reality Home-Based Intervention for Auditory Hypersensitivity Experienced by Autistic Children,” Applied Sciences, vol. 13, iss. 11, p. 6783, 2023.
    [BibTeX] [Download PDF]
    @article{parkinson2023soundfields,
    title={{SoundFields: A Virtual Reality Home-Based Intervention for Auditory Hypersensitivity Experienced by Autistic Children}},
    author={Parkinson, Sarah and Schumann, Sophie and Taylor, Amelia and Fenton, Clare and Kearney, Gavin and Garside, Megan and Johnston, Daniel},
    journal={{Applied Sciences}},
    volume={13},
    number={11},
    pages={6783},
    year={2023},
    publisher={MDPI},
    url = {https://www.mdpi.com/2076-3417/13/11/6783}
    }

  • S. Durbridge and D. T. Murphy, “Assessment of soundscapes using self-report and physiological measures,” Acta acust., vol. 7, p. 6, 2023. doi:10.1051/aacus/2022059
    [BibTeX] [Download PDF]
    @Article{refId0,
    author = {Durbridge, Simon and Murphy, Damian Thomas},
    title = {Assessment of soundscapes using self-report and physiological measures},
    journal = {Acta Acust.},
    year = {2023},
    volume = {7},
    pages = {6},
    doi = {10.1051/aacus/2022059},
    url = {https://doi.org/10.1051/aacus/2022059},
    }

  • T. Rudzki, D. Murphy, and G. Kearney, “User Preference Evaluation of Direct-to-Reverberant Ratio of Virtual Ambisonic Listening Spaces,” in Audio Engineering Society Conference: AES 2023 International Conference on Spatial and Immersive Audio, 2023.
    [BibTeX] [Download PDF]
    @inproceedings{rudzki2023user,
    title={{User Preference Evaluation of Direct-to-Reverberant Ratio of Virtual Ambisonic Listening Spaces}},
    author={Rudzki, Tomasz and Murphy, Damian and Kearney, Gavin},
    booktitle={{Audio Engineering Society Conference: AES 2023 International Conference on Spatial and Immersive Audio}},
    year={2023},
    organization={Audio Engineering Society},
    url = {https://www.aes.org/e-lib/browse.cfm?elib=22171}
    }

  • J. Williams, J. Francombe, and D. Murphy, “Exploring the influence of multichannel soundtracks on film immersion,” in Audio engineering society conference: aes 2023 international conference on spatial and immersive audio, 2023.
    [BibTeX] [Download PDF]
    @conference{williams2023exploring,
    title = {Exploring the influence of multichannel soundtracks on film immersion},
    author = {Williams, Joseph and Francombe, Jon and Murphy, Damian},
    booktitle = {Audio Engineering Society Conference: AES 2023 International Conference on Spatial and Immersive Audio},
    month = {Aug},
    year = {2023},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=22214}
    }

  • K. Young and G. Kearney, “A High-Resolution Boundary Element Method Suitable Full Torso Mesh of KEMAR,” Journal of the Audio Engineering Society, vol. 71, iss. 7/8, p. 492–501, 2023.
    [BibTeX] [Download PDF]
    @article{young2023high,
    title={{A High-Resolution Boundary Element Method Suitable Full Torso Mesh of KEMAR}},
    author={Young, Kat and Kearney, Gavin},
    journal={{Journal of the Audio Engineering Society}},
    volume={71},
    number={7/8},
    pages={492--501},
    year={2023},
    publisher={Audio Engineering Society},
    url = {https://www.aes.org/e-lib/browse.cfm?elib=22154}
    }

2022

  • G. Acosta and H. Daffern, “Vibrato dynamics in group singing: understanding vibrato behaviour through dynamical systems theory,” in Proceedings of the 14th pan-european voice conference, 2022.
    [BibTeX]
    @inproceedings{a1,
    title={Vibrato dynamics in group singing: Understanding vibrato behaviour through dynamical systems theory},
    author={Acosta, Gerardo and Daffern, Helena},
    booktitle={Proceedings of the 14th Pan-European Voice Conference},
    year={2022}
    }

  • G. Kearney, H. Daffern, P. Cairns, A. Hunt, B. Lee, J. Cooper, P. Tsagkarakis, T. Rudzki, and D. Johnston, “Measuring the acoustical properties of the bbc maida vale recording studios for virtual reality,” Acoustics, vol. 4, iss. 3, p. 783–799, 2022. doi:10.3390/acoustics4030047
    [BibTeX] [Download PDF]
    @article{acoustics4030047,
    AUTHOR = {Kearney, Gavin and Daffern, Helena and Cairns, Patrick and Hunt, Anthony and Lee, Ben and Cooper, Jacob and Tsagkarakis, Panos and Rudzki, Tomasz and Johnston, Daniel},
    TITLE = {Measuring the Acoustical Properties of the BBC Maida Vale Recording Studios for Virtual Reality},
    JOURNAL = {Acoustics},
    VOLUME = {4},
    YEAR = {2022},
    NUMBER = {3},
    PAGES = {783--799},
    URL = {https://www.mdpi.com/2624-599X/4/3/47},
    ISSN = {2624-599X},
    DOI = {10.3390/acoustics4030047}
    }

  • D. Geary, J. Francombe, K. Hentschel, and D. Murphy, “Design dimensions of co-located multi-device audio experiences,” Applied sciences, vol. 12, iss. 15, 2022. doi:10.3390/app12157512
    [BibTeX] [Abstract] [Download PDF]

    The widespread distribution of mobile computing presents new opportunities for the consumption of interactive and immersive media experiences using multiple connected devices. Tools now exist for the creation of these experiences; however, there is still limited understanding of the best design practices and use cases for the technology, especially in the context of audio experiences. In this study, the application space of co-located multi-device audio experiences is explored and documented through a review of the literature and a survey. Using the obtained information, a set of seven design dimensions that can be used to characterise and compare experiences of this type is proposed; these are synchronisation, context, position, relationship, interactivity, organisation, and distribution. A mapping of the current application space is presented where four categories are identified using the design dimensions, these are public performances, interactive music, augmented broadcasting, and social games. Finally, the overlap between co-located multi-device audio and audio-augmented reality (AAR) experiences is highlighted and discussed. This work will contribute to the wider discussion about the role of multiple devices in audio experiences and provide a source of reference for the design of future multi-device audio experiences.

    @Article{app12157512,
    AUTHOR = {Geary, David and Francombe, Jon and Hentschel, Kristian and Murphy, Damian},
    TITLE = {Design Dimensions of Co-Located Multi-Device Audio Experiences},
    JOURNAL = {Applied Sciences},
    VOLUME = {12},
    YEAR = {2022},
    NUMBER = {15},
    ARTICLE-NUMBER = {7512},
    URL = {https://www.mdpi.com/2076-3417/12/15/7512},
    ISSN = {2076-3417},
    ABSTRACT = {The widespread distribution of mobile computing presents new opportunities for the consumption of interactive and immersive media experiences using multiple connected devices. Tools now exist for the creation of these experiences; however, there is still limited understanding of the best design practices and use cases for the technology, especially in the context of audio experiences. In this study, the application space of co-located multi-device audio experiences is explored and documented through a review of the literature and a survey. Using the obtained information, a set of seven design dimensions that can be used to characterise and compare experiences of this type is proposed; these are synchronisation, context, position, relationship, interactivity, organisation, and distribution. A mapping of the current application space is presented where four categories are identified using the design dimensions, these are public performances, interactive music, augmented broadcasting, and social games. Finally, the overlap between co-located multi-device audio and audio-augmented reality (AAR) experiences is highlighted and discussed. This work will contribute to the wider discussion about the role of multiple devices in audio experiences and provide a source of reference for the design of future multi-device audio experiences.},
    DOI = {10.3390/app12157512}
    }

  • P. Cairns, A. Hunt, J. Cooper, D. Johnston, B. Lee, H. Daffern, and G. Kearney, “Recording music in the metaverse: a case study of xr bbc maida vale recording studios,” in Audio engineering society conference: aes 2022 international audio for virtual and augmented reality conference, 2022.
    [BibTeX]
    @InProceedings{Cairns2022,
    author = {Cairns, Patrick and Hunt, Anthony and Cooper, Jacob and Johnston, Daniel and Lee, Ben and Daffern, Helena and Kearney, Gavin},
    title = {Recording Music in the Metaverse: A case study of XR BBC Maida Vale Recording Studios},
    booktitle = {Audio Engineering Society Conference: AES 2022 International Audio for Virtual and Augmented Reality Conference},
    year = {2022},
    organization = {Audio Engineering Society},
    }

  • P. Cairns, A. Hunt, J. Cooper, D. Johnston, B. Lee, H. Daffern, and G. Kearney, “Recording music in the metaverse: a case study of xr bbc maida vale recording studios,” in Audio engineering society conference: aes 2022 international audio for virtual and augmented reality conference, 2022.
    [BibTeX] [Download PDF]
    @conference{cairns2022recording,
    title = {Recording Music in the Metaverse: A case study of XR BBC Maida Vale Recording Studios},
    author = {Cairns, Patrick and Hunt, Anthony and Cooper, Jacob and Johnston, Daniel and Lee, Ben and Daffern, Helena and Kearney, Gavin},
    booktitle = {Audio Engineering Society Conference: AES 2022 International Audio for Virtual and Augmented Reality Conference},
    month = {Aug},
    year = {2022},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=21842}
    }

  • S. Durbridge and D. Murphy, “Soundscape evaluation in the virtual reality: tools for the creation of soundscape studies,” in Audio engineering society conference: aes 2022 international audio for virtual and augmented reality conference, 2022.
    [BibTeX]
    @inproceedings{durbridge2022soundscape,
    title={Soundscape Evaluation In The Virtual Reality: Tools for the creation of soundscape studies},
    author={Durbridge, Simon and Murphy, Damian},
    booktitle={Audio Engineering Society Conference: AES 2022 International Audio for Virtual and Augmented Reality Conference},
    year={2022},
    organization={Audio Engineering Society}
    }

  • D. Geary, J. Francombe, K. Hentsche, and D. Murphy, “A survey of co-located multi-device audio experiences,” in Audio engineering society conference: aes 2022 international audio for virtual and augmented reality conference, 2022.
    [BibTeX]
    @inproceedings{geary2022survey,
    title={A survey of co-located multi-device audio experiences},
    author={Geary, David and Francombe, Jon and Hentsche, Kristian and Murphy, Damian},
    booktitle={Audio Engineering Society Conference: AES 2022 International Audio for Virtual and Augmented Reality Conference},
    year={2022},
    organization={Audio Engineering Society}
    }

  • J. Gregg, G. Kearney, and L. Ward, “Using enhanced audio to create an accessible mobile augmented reality app for visually impaired users,” in Audio engineering society conference: aes 2022 international audio for virtual and augmented reality conference, 2022.
    [BibTeX]
    @inproceedings{gregg2022using,
    title={Using enhanced audio to create an accessible mobile augmented reality app for visually impaired users},
    author={Gregg, Joshua and Kearney, Gavin and Ward, Lauren},
    booktitle={Audio Engineering Society Conference: AES 2022 International Audio for Virtual and Augmented Reality Conference},
    year={2022},
    organization={Audio Engineering Society}
    }

  • D. Turner, D. Murphy, C. Pike, and C. Baume, “Spatial audio production for immersive media experiences: perspectives on practice-led approaches to designing immersive audio content,” Soundtrack, the, vol. 13, iss. 1, pp. 73-94, 2022. doi:https://doi.org/10.1386/ts_00017_1
    [BibTeX] [Abstract] [Download PDF]

    Sound design with the goal of immersion is not new. However, sound design for immersive media experiences (IMEs) utilizing spatial audio can still be considered a relatively new area of practice with less well-defined methods requiring a new and still emerging set of skills and tools. There is, at present, a lack of formal literature around the challenges introduced by this relatively new content form and the tools used to create it, and how these may differ from audio production for traditional media. This article, through the use of semi-structured interviews and an online questionnaire, looks to explore what audio practitioners view as defining features of IMEs, the challenges in creating audio content for IMEs and how current practices for traditional stereo productions are being adapted for use within 360 interactive soundfields. It also highlights potential direction for future research and technological development and the importance of practitioner involvement in research and development in ensuring future tools and technologies satisfy the current needs.

    @article{intel:/content/journals/10.1386/ts_00017_1,
    author = "Turner, Daniel and Murphy, Damian and Pike, Chris and Baume, Chris",
    title = "Spatial audio production for immersive media experiences: Perspectives on practice-led approaches to designing immersive audio content",
    journal= "Soundtrack, The",
    year = "2022",
    volume = "13",
    number = "1",
    pages = "73-94",
    doi = "https://doi.org/10.1386/ts_00017_1",
    url = "https://intellectdiscover.com/content/journals/10.1386/ts_00017_1",
    publisher = "Intellect",
    issn = "1751-4207",
    type = "Journal Article",
    keywords = "sound design",
    keywords = "extended reality",
    keywords = "immersive audio",
    keywords = "spatial audio",
    keywords = "immersive media",
    keywords = "audio production",
    abstract = "Sound design with the goal of immersion is not new. However, sound design for immersive media experiences (IMEs) utilizing spatial audio can still be considered a relatively new area of practice with less well-defined methods requiring a new and still emerging set of skills and tools. There is, at present, a lack of formal literature around the challenges introduced by this relatively new content form and the tools used to create it, and how these may differ from audio production for traditional media. This article, through the use of semi-structured interviews and an online questionnaire, looks to explore what audio practitioners view as defining features of IMEs, the challenges in creating audio content for IMEs and how current practices for traditional stereo productions are being adapted for use within 360 interactive soundfields. It also highlights potential direction for future research and technological development and the importance of practitioner involvement in research and development in ensuring future tools and technologies satisfy the current needs.",
    }

  • D. Johnston, H. Egermann, and G. Kearney, “The Use of Binaural Based Spatial Audio in the Reduction of Auditory Hypersensitivity in Autistic Young People,” International Journal of Environmental Research and Public Health, vol. 19, iss. 19, p. 12474, 2022.
    [BibTeX] [Download PDF]
    @article{johnston2022use,
    title={{The Use of Binaural Based Spatial Audio in the Reduction of Auditory Hypersensitivity in Autistic Young People}},
    author={Johnston, Daniel and Egermann, Hauke and Kearney, Gavin},
    journal={{International Journal of Environmental Research and Public Health}},
    volume={19},
    number={19},
    pages={12474},
    year={2022},
    publisher={MDPI},
    url = {https://www.mdpi.com/1660-4601/19/19/12474}
    }

  • B. F. Katz, D. Murphy, and A. Farina, “Phe: the past has ears project overview,” in 2nd symposium: the acoustics of ancient theatres, 2022, p. 1–4.
    [BibTeX]
    @inproceedings{katz2022phe,
    title={PHE: The Past Has Ears project overview},
    author={Katz, Brian FG and Murphy, Damian and Farina, Angelo},
    booktitle={2nd Symposium: The Acoustics of Ancient Theatres},
    pages={1--4},
    year={2022}
    }

  • M. Lopez, G. Kearney, and K. Hofstädter, “Seeing films through sound: sound design, spatial audio, and accessibility for visually impaired audiences,” British journal of visual impairment, vol. 40, iss. 2, p. 117–144, 2022.
    [BibTeX]
    @article{lopez2022seeing,
    title={Seeing films through sound: Sound design, spatial audio, and accessibility for visually impaired audiences},
    author={Lopez, Mariana and Kearney, Gavin and Hofst{\"a}dter, Kriszti{\'a}n},
    journal={British Journal of Visual Impairment},
    volume={40},
    number={2},
    pages={117--144},
    year={2022},
    publisher={SAGE Publications Sage UK: London, England}
    }

  • T. McKenzie, C. Armstrong, L. Ward, D. T. Murphy, and G. Kearney, “Predicting the colouration between binaural signals,” Applied sciences, vol. 12, iss. 5, p. 2441, 2022.
    [BibTeX]
    @article{mckenzie2022predicting,
    title={Predicting the Colouration between Binaural Signals},
    author={McKenzie, Thomas and Armstrong, Cal and Ward, Lauren and Murphy, Damian T and Kearney, Gavin},
    journal={Applied Sciences},
    volume={12},
    number={5},
    pages={2441},
    year={2022},
    publisher={MDPI}
    }

  • H. Mi, G. Kearney, and H. Daffern, “Impact thresholds of parameters of binaural room impulse responses (brirs) on perceptual reverberation,” Applied sciences, vol. 12, iss. 6, p. 2823, 2022.
    [BibTeX]
    @article{mi2022impact,
    title={Impact Thresholds of Parameters of Binaural Room Impulse Responses (BRIRs) on Perceptual Reverberation},
    author={Mi, Huan and Kearney, Gavin and Daffern, Helena},
    journal={Applied Sciences},
    volume={12},
    number={6},
    pages={2823},
    year={2022},
    publisher={MDPI}
    }

  • G. Acosta and H. Daffern, “The role of vibrato in choral singing: a systematic review,” Journal of voice, 2022.
    [BibTeX]
    @article{p1,
    title={The role of vibrato in choral singing: A systematic review},
    author={Acosta, Gerardo and Daffern, Helena},
    journal={Journal of Voice},
    year={2022},
    publisher={Elsevier}
    }

  • C. Popp and D. T. Murphy, “Creating audio object-focused acoustic environments for room-scale virtual reality,” Applied sciences, vol. 12, iss. 14, p. 7306, 2022.
    [BibTeX]
    @article{popp2022creating,
    title={Creating Audio Object-Focused Acoustic Environments for Room-Scale Virtual Reality},
    author={Popp, Constantin and Murphy, Damian T},
    journal={Applied Sciences},
    volume={12},
    number={14},
    pages={7306},
    year={2022},
    publisher={MDPI}
    }

  • C. Popp and D. T. Murphy, “Establishment and implementation of guidelines for narrative audio-based room-scale virtual reality using practice-based methods,” in Audio engineering society conference: aes 2022 international audio for virtual and augmented reality conference, 2022.
    [BibTeX]
    @inproceedings{popp2022establishment,
    title={Establishment and Implementation of Guidelines for Narrative Audio-based Room-scale Virtual Reality using Practice-based Methods},
    author={Popp, Constantin and Murphy, Damian T},
    booktitle={Audio Engineering Society Conference: AES 2022 International Audio for Virtual and Augmented Reality Conference},
    year={2022},
    organization={Audio Engineering Society}
    }

  • T. Rudzki, D. Murphy, and G. Kearney, “Xr-based hrtf measurements,” in Audio engineering society conference: aes 2022 international audio for virtual and augmented reality conference, 2022.
    [BibTeX]
    @inproceedings{rudzki2022xr,
    title={XR-based HRTF Measurements},
    author={Rudzki, Tomasz and Murphy, Damian and Kearney, Gavin},
    booktitle={Audio Engineering Society Conference: AES 2022 International Audio for Virtual and Augmented Reality Conference},
    year={2022},
    organization={Audio Engineering Society}
    }

  • J. Williams, S. Shepstone, and D. Murphy, “Understanding immersion in the context of films with spatial audio,” in Audio engineering society conference: aes 2022 international audio for virtual and augmented reality conference, 2022.
    [BibTeX]
    @inproceedings{williams2022understanding,
    title={Understanding Immersion in the Context of Films with Spatial Audio},
    author={Williams, Joseph and Shepstone, Sven and Murphy, Damian},
    booktitle={Audio Engineering Society Conference: AES 2022 International Audio for Virtual and Augmented Reality Conference},
    year={2022},
    organization={Audio Engineering Society}
    }

  • K. Young, T. Sweeney, R. R. Vos, F. Mehendale, and H. Daffern, “Evaluation of noise excitation as a method for detection of hypernasality,” Applied acoustics, vol. 190, p. 108639, 2022.
    [BibTeX]
    @article{young2022evaluation,
    title={Evaluation of noise excitation as a method for detection of hypernasality},
    author={Young, Kat and Sweeney, Triona and Vos, Rebecca R and Mehendale, Felicity and Daffern, Helena},
    journal={Applied Acoustics},
    volume={190},
    pages={108639},
    year={2022},
    publisher={Elsevier}
    }

2021

  • C. Armstrong and G. Kearney, “Ambisonics understood,” in 3d audio, Routledge, 2021, p. 99–129.
    [BibTeX]
    @incollection{armstrong2021ambisonics,
    title={Ambisonics understood},
    author={Armstrong, Cal and Kearney, Gavin},
    booktitle={3D Audio},
    pages={99--129},
    year={2021},
    publisher={Routledge}
    }

  • P. Cairns, H. Daffern, and G. Kearney, “Parametric evaluation of ensemble vocal performance using an immersive network music performance audio system,” Journal of the audio engineering society, vol. 69, iss. 12, p. 924–933, 2021.
    [BibTeX]
    @article{cairns2021parametric,
    title={Parametric Evaluation of Ensemble Vocal Performance Using an Immersive Network Music Performance Audio System},
    author={Cairns, Patrick and Daffern, Helena and Kearney, Gavin},
    journal={Journal of the Audio Engineering Society},
    volume={69},
    number={12},
    pages={924--933},
    year={2021},
    publisher={Audio Engineering Society}
    }

  • H. Daffern and A. J. Gully, “Assessing articulatory perturbations during a sung vowel-matching exercise using articulography,” Biomedical signal processing and control, vol. 67, p. 102546, 2021.
    [BibTeX]
    @article{daffern2021assessing,
    title={Assessing articulatory perturbations during a sung vowel-matching exercise using articulography},
    author={Daffern, Helena and Gully, Amelia Jane},
    journal={Biomedical Signal Processing and Control},
    volume={67},
    pages={102546},
    year={2021},
    publisher={Elsevier}
    }

  • H. Daffern, K. Balmer, and J. Brereton, “Singing together, yet apart: the experience of uk choir members and facilitators during the covid-19 pandemic,” Frontiers in psychology, vol. 12, p. 624474, 2021.
    [BibTeX]
    @article{daffern2021singing,
    title={Singing together, yet apart: the experience of UK choir members and facilitators during the Covid-19 pandemic},
    author={Daffern, Helena and Balmer, Kelly and Brereton, Jude},
    journal={Frontiers in psychology},
    volume={12},
    pages={624474},
    year={2021},
    publisher={Frontiers Media SA}
    }

  • H. Daffern and S. D’Amario, “Understanding expressive ensemble singing through acoustics,” Together in music: coordination, expression, participation, p. 129, 2021.
    [BibTeX]
    @article{daffern2021understanding,
    title={Understanding expressive ensemble singing through acoustics},
    author={Daffern, Helena and D'Amario, Sara},
    journal={Together in Music: Coordination, expression, participation},
    pages={129},
    year={2021},
    publisher={Oxford University Press}
    }

  • T. Keren-Portnoy, H. Daffern, R. A. DePaolis, C. M. Cox, K. I. Brown, F. A. Oxley, and M. Kanaan, “Did i just do that? six-month-olds learn the contingency between their vocalizations and a visual reward in 5 minutes,” Infancy, vol. 26, iss. 6, p. 1057–1075, 2021.
    [BibTeX]
    @Article{keren2021did,
    author = {Keren-Portnoy, Tamar and Daffern, Helena and DePaolis, Rory A and Cox, Christopher MM and Brown, Ken I and Oxley, Florence AR and Kanaan, Mona},
    title = {Did I just do that? Six-month-olds learn the contingency between their vocalizations and a visual reward in 5 minutes},
    journal = {Infancy},
    year = {2021},
    volume = {26},
    number = {6},
    pages = {1057--1075},
    publisher = {Wiley Online Library},
    }

  • M. J. Lopez, G. Kearney, and K. Hofstadter, “Enhancing audio description: inclusive cinematic experiences through sound design,” Journal of audiovisual translation, p. 157–182, 2021.
    [BibTeX]
    @article{lopez2021enhancing,
    title={Enhancing audio description: Inclusive cinematic experiences through sound design},
    author={Lopez, Mariana Julieta and Kearney, Gavin and Hofstadter, Krisztian},
    journal={Journal of Audiovisual Translation},
    pages={157--182},
    year={2021},
    publisher={York}
    }

  • R. Timmers, F. Bailes, and H. Daffern, Together in music: coordination, expression, participation, Oxford University Press, 2021.
    [BibTeX]
    @Book{timmers2021together,
    title = {Together in Music: Coordination, expression, participation},
    publisher = {Oxford University Press},
    year = {2021},
    author = {Timmers, Renee and Bailes, Freya and Daffern, Helena},
    }

2020

  • B. F. G. Katz, D. Murphy, and A. Farina, “The past has ears (phe): xr explorations of acoustic spaces as cultural heritage,” , p. 91–98, 2020.
    [BibTeX] [Abstract]

    Hearing is one of our most pervasive senses. There is no equivalent to closing our eyes, or averting our gaze, for the ears. When we think about great architectural achievements in European history, such as ancient amphitheatres or Gothic cathedrals, their importance is strongly tied to their acoustic environment. The acoustics of a heritage site is an intangible consequence of the space’s tangible construction and furnishings. Inspired by the project’s namesake (Phé, for the constellation Phoenix), and the relatively recent fires at Cathédrale de Notre Dame de Paris and Teatro La Fenice opera hall, the PHE project focuses on virtual reconstruction of heritage sites, bringing them back from the ashes. In addressing the intangible acoustic heritage of architectural sites, three main objectives have been identified for this research project: Documentation, Modelling, and Presentation. In parallel, three heritage sites are participating as case studies: Tindari Theatre (IT), Notre-Dame de Paris Cathedral (FR), and The Houses of Parliament (UK). The acoustics of a space is immersive, spatial, and due to the nature of auditory perception egocentric, in contrast to visual perception of an object, which can be observed from “outside”. Consequently, presentation methods for communicating acoustic heritage must represent the spatially immersive and listener-centric nature of acoustics. PHE will lead development of a museum grade hardware/software prototype for the presentation of immersive audio experiences adaptable to multiple platforms, from off-site immersive speaker installations, to mobile XR via smartphone applications.

    @article{10.1007/978-3-030-58468-9_7,
    author="Katz, Brian F. G.
    and Murphy, Damian
    and Farina, Angelo",
    editor="De Paolis, Lucio Tommaso
    and Bourdot, Patrick",
    title="The Past Has Ears (PHE): XR Explorations of Acoustic Spaces as Cultural Heritage",
    booktitle="Augmented Reality, Virtual Reality, and Computer Graphics",
    year="2020",
    publisher="Springer International Publishing",
    address="Cham",
    pages="91--98",
    abstract="Hearing is one of our most pervasive senses. There is no equivalent to closing our eyes, or averting our gaze, for the ears. When we think about great architectural achievements in European history, such as ancient amphitheatres or Gothic cathedrals, their importance is strongly tied to their acoustic environment. The acoustics of a heritage site is an intangible consequence of the space's tangible construction and furnishings. Inspired by the project's namesake (Ph{\'e}, for the constellation Phoenix), and the relatively recent fires at Cath{\'e}drale de Notre Dame de Paris and Teatro La Fenice opera hall, the PHE project focuses on virtual reconstruction of heritage sites, bringing them back from the ashes. In addressing the intangible acoustic heritage of architectural sites, three main objectives have been identified for this research project: Documentation, Modelling, and Presentation. In parallel, three heritage sites are participating as case studies: Tindari Theatre (IT), Notre-Dame de Paris Cathedral (FR), and The Houses of Parliament (UK). The acoustics of a space is immersive, spatial, and due to the nature of auditory perception egocentric, in contrast to visual perception of an object, which can be observed from ``outside''. Consequently, presentation methods for communicating acoustic heritage must represent the spatially immersive and listener-centric nature of acoustics. PHE will lead development of a museum grade hardware/software prototype for the presentation of immersive audio experiences adaptable to multiple platforms, from off-site immersive speaker installations, to mobile XR via smartphone applications.",
    isbn="978-3-030-58468-9"
    }

  • B. F. G. Katz, D. Murphy, and A. Farina, “Exploring cultural heritage through acoustic digital reconstructions,” Physics today, vol. 73, iss. 12, pp. 32-37, 2020. doi:10.1063/PT.3.4633
    [BibTeX] [Abstract] [Download PDF]

    {The fire at Notre Dame Cathedral in Paris in 2019 and the one at Gran Teatro La Fenice opera hall in Venice in 1996 are reminders of the fragile nature of humanity’s cultural heritage. Fortunately, acoustic measurements, numerical simulations, and digital reconstructions can recover—and to some extent preserve—the sound of humanity’s great architectural sites. What’s more, those techniques provide a way for archaeologists, historians, musicologists, and the general public to experience the lost acoustics of damaged or destroyed places.}

    @article{10.1063/PT.3.4633,
    author = {Katz, Brian F. G. and Murphy, Damian and Farina, Angelo},
    title = "{Exploring cultural heritage through acoustic digital reconstructions}",
    journal = {Physics Today},
    volume = {73},
    number = {12},
    pages = {32-37},
    year = {2020},
    month = {12},
    abstract = "{The fire at Notre Dame Cathedral in Paris in 2019 and the one at Gran Teatro La Fenice opera hall in Venice in 1996 are reminders of the fragile nature of humanity’s cultural heritage. Fortunately, acoustic measurements, numerical simulations, and digital reconstructions can recover—and to some extent preserve—the sound of humanity’s great architectural sites. What’s more, those techniques provide a way for archaeologists, historians, musicologists, and the general public to experience the lost acoustics of damaged or destroyed places.}",
    issn = {0031-9228},
    doi = {10.1063/PT.3.4633},
    url = {https://doi.org/10.1063/PT.3.4633},
    eprint = {https://pubs.aip.org/physicstoday/article-pdf/73/12/32/10123628/32\_1\_online.pdf},
    }

  • J. Brereton, H. Daffern, K. Young, and M. Lovedee-Turner, “Addressing Gender Equality in Music Production: Current Challenges, Opportunities for Change, and Recommendations,” in Gender in music production, R. Hepworth-Sawyer, J. Hodgson, L. King, and M. Marrington, Eds., Routledge, 2020.
    [BibTeX] [Download PDF]
    @InCollection{Brereton2020,
    author = {Brereton, Jude and Daffern, Helena and Young, Kat and Lovedee-Turner, Michael},
    title = {{Addressing Gender Equality in Music Production: Current Challenges, Opportunities for Change, and Recommendations}},
    booktitle = {Gender in Music Production},
    publisher = {Routledge},
    year = {2020},
    editor = {Hepworth-Sawyer, Russ and Hodgson, Jay and King, Liesl and Marrington, Mark},
    chapter = {14},
    url = {https://www.taylorfrancis.com/books/e/9780429464515/chapters/10.4324/9780429464515-18},
    }

  • J. Brereton, H. Daffern, M. Green, F. Stevens, and A. Hunt, “Enhancing Student Employability Through Innovative Programme Design: A Case Study,” in Audio education: theory, culture, and practice, D. Walzer and M. Lopez, Eds., Routledge, 2020.
    [BibTeX] [Download PDF]
    @InCollection{Brereton2020a,
    author = {Brereton, Jude and Daffern, Helena and Green, Marc and Stevens, Frank and Hunt, Andy},
    title = {{Enhancing Student Employability Through Innovative Programme Design: A Case Study}},
    booktitle = {Audio Education: Theory, Culture, and Practice},
    publisher = {Routledge},
    year = {2020},
    editor = {Walzer, Daniel and Lopez, Mariana},
    url = {https://www.routledge.com/Audio-Education-Theory-Culture-and-Practice/Walzer-Lopez/p/book/9780367074449},
    }

  • P. Cairns, H. Daffern, and G. Kearney, “Immersive network music performance: design and practical deployment of a system for immersive vocal performance,” in Audio engineering society convention 149, 2020.
    [BibTeX]
    @inproceedings{cairns2020immersive,
    title={Immersive network music performance: Design and practical deployment of a system for immersive vocal performance},
    author={Cairns, Patrick and Daffern, Helena and Kearney, Gavin},
    booktitle={Audio Engineering Society Convention 149},
    year={2020},
    organization={Audio Engineering Society}
    }

  • D. A. Camlin, H. Daffern, and K. Zeserson, “Group singing as a resource for the development of a healthy public: a study of adult group singing,” Humanities and social sciences communications, vol. 7, iss. 1, p. 1–15, 2020.
    [BibTeX]
    @article{camlin2020group,
    title={Group singing as a resource for the development of a healthy public: a study of adult group singing},
    author={Camlin, David A and Daffern, Helena and Zeserson, Katherine},
    journal={Humanities and Social Sciences Communications},
    volume={7},
    number={1},
    pages={1--15},
    year={2020},
    publisher={Palgrave}
    }

  • H. Daffern, T. Keren-Portnoy, R. A. DePaolis, and K. I. Brown, “Babbleplay: an app for infants, controlled by infants, to improve early language outcomes,” Applied acoustics, vol. 162, p. 107183, 2020.
    [BibTeX]
    @article{daffern2020babbleplay,
    title={BabblePlay: An app for infants, controlled by infants, to improve early language outcomes},
    author={Daffern, Helena and Keren-Portnoy, Tamar and DePaolis, Rory A and Brown, Kenneth I},
    journal={Applied Acoustics},
    volume={162},
    pages={107183},
    year={2020},
    publisher={Elsevier}
    }

  • M. C. Green and D. T. Murphy, “Environmental Sound Monitoring Using Machine Learning on Mobile Devices,” Applied Acoustics, vol. 159, 2020. doi:10.1016/j.apacoust.2019.107041
    [BibTeX]
    @Article{Green2020,
    author = {M. C. Green and D. T. Murphy},
    title = {{Environmental Sound Monitoring Using Machine Learning on Mobile Devices}},
    journal = {{Applied Acoustics}},
    year = {2020},
    volume = {159},
    doi = {10.1016/j.apacoust.2019.107041},
    }

  • D. Johnston, H. Egermann, and G. Kearney, “SoundFields: A Virtual Reality Game Designed to Address Auditory Hypersensitivity in Individuals with Autism Spectrum Disorder,” Applied sciences, vol. 10, iss. 9, p. 2996, 2020. doi:10.3390/app10092996
    [BibTeX]
    @Article{Johnston2020,
    author = {Johnston, Daniel and Egermann, Hauke and Kearney, Gavin},
    title = {{SoundFields: A Virtual Reality Game Designed to Address Auditory Hypersensitivity in Individuals with Autism Spectrum Disorder}},
    journal = {Applied Sciences},
    year = {2020},
    volume = {10},
    number = {9},
    pages = {2996},
    doi = {10.3390/app10092996},
    publisher = {Multidisciplinary Digital Publishing Institute},
    }

  • M. Lopez, G. Kearney, K. Hofstädter, and G. Balla, “Enhancing audio description: accessible filmmaking, sound design and the importance of educating filmmakers,” Media practice and education, vol. 21, iss. 4, p. 289–304, 2020.
    [BibTeX]
    @article{lopez2020enhancing,
    title={Enhancing audio description: accessible filmmaking, sound design and the importance of educating filmmakers},
    author={Lopez, Mariana and Kearney, Gavin and Hofst{\"a}dter, Krisztian and Balla, Gianluca},
    journal={Media Practice and Education},
    volume={21},
    number={4},
    pages={289--304},
    year={2020},
    publisher={Taylor \& Francis}
    }

  • T. Rudzki, D. Murphy, and G. Kearney, “On the measurement of perceived lateral angle using eye tracking,” in Audio engineering society conference: 2020 aes international conference on audio for virtual and augmented reality, 2020.
    [BibTeX]
    @inproceedings{rudzki2020measurement,
    title={On the Measurement of Perceived Lateral Angle Using Eye Tracking},
    author={Rudzki, Tomasz and Murphy, Damian and Kearney, Gavin},
    booktitle={Audio Engineering Society Conference: 2020 AES International Conference on Audio for Virtual and Augmented Reality},
    year={2020},
    organization={Audio Engineering Society}
    }

  • B. Tsui, W. A. Smith, and G. Kearney, “Low-order spherical harmonic hrtf restoration using a neural network approach,” Applied sciences, vol. 10, iss. 17, p. 5764, 2020.
    [BibTeX]
    @article{tsui2020low,
    title={Low-Order Spherical Harmonic HRTF Restoration Using a Neural Network Approach},
    author={Tsui, Benjamin and Smith, William AP and Kearney, Gavin},
    journal={Applied Sciences},
    volume={10},
    number={17},
    pages={5764},
    year={2020},
    publisher={MDPI}
    }

  • O. Wilde and G. Kearney, “Cross-modal investigations for improving sound localisation accuracy: a mounted vibrotactile headset design,” in Audio engineering society convention 149, 2020.
    [BibTeX]
    @inproceedings{wilde2020cross,
    title={Cross-Modal Investigations For Improving Sound Localisation Accuracy: A Mounted Vibrotactile Headset Design},
    author={Wilde, Oliver and Kearney, Gavin},
    booktitle={Audio Engineering Society Convention 149},
    year={2020},
    organization={Audio Engineering Society}
    }

  • A. M. Cassorla, G. Kearney, A. Hunt, H. Riaz, M. Stiles, and D. T. Murphy, “Augmented Reality for DAW-Based Spatial Audio Creation Using Smartphones,” in Audio Engineering Society 148th Convention, 2020.
    [BibTeX] [Download PDF]
    @InProceedings{,
    author = {Cassorla, Adrià M. and Kearney, Gavin and Hunt, Andy and Riaz, Hashim and Stiles, Mirek and Murphy, Damian T.},
    title = {{Augmented Reality for DAW-Based Spatial Audio Creation Using Smartphones}},
    booktitle = {{Audio Engineering Society 148th Convention}},
    year = {2020},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=20761},
    }

  • D. Turner, C. Pike, and D. Murphy, “Content Matching for Sound Generating Objects within a Visual Scene using a Computer Vision Approach,” in Audio Engineering Society 148th Convention, 2020.
    [BibTeX] [Download PDF]
    @InProceedings{,
    author = {Turner, Daniel and Pike, Chris and Murphy, Damian},
    title = {{Content Matching for Sound Generating Objects within a Visual Scene using a Computer Vision Approach}},
    booktitle = {{Audio Engineering Society 148th Convention}},
    year = {2020},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=20792},
    }

2019

  • T. Rudzki, I. Gomez-Lanzaco, J. Stubbs, J. Skoglund, D. T. Murphy, and G. Kearney, “Auditory Localization in Low-Bitrate Compressed Ambisonic Scenes,” Applied Sciences, vol. 9, iss. 13, 2019. doi:10.3390/app9132618
    [BibTeX] [Download PDF]
    @Article{app9132618,
    author = {T. Rudzki and I. Gomez-Lanzaco and J. Stubbs and J. Skoglund and D. T. Murphy and G. Kearney},
    title = {{Auditory Localization in Low-Bitrate Compressed Ambisonic Scenes}},
    journal = {{Applied Sciences}},
    year = {2019},
    volume = {9},
    number = {13},
    issn = {2076-3417},
    article-number = {2618},
    doi = {10.3390/app9132618},
    url = {https://www.mdpi.com/2076-3417/9/13/2618},
    }

  • D. Johnston, H. Egermann, and G. Kearney, “Measuring the Behavioral Response to Spatial Audio within a Multi-Modal Virtual Reality Environment in Children with Autism Spectrum Disorder,” Applied Sciences, vol. 9, iss. 15, 2019. doi:10.3390/app9153152
    [BibTeX] [Download PDF]
    @Article{app9153152,
    author = {Johnston, Daniel and Egermann, Hauke and Kearney, Gavin},
    title = {{Measuring the Behavioral Response to Spatial Audio within a Multi-Modal Virtual Reality Environment in Children with Autism Spectrum Disorder}},
    journal = {{Applied Sciences}},
    year = {2019},
    volume = {9},
    number = {15},
    issn = {2076-3417},
    article-number = {3152},
    doi = {10.3390/app9153152},
    url = {https://www.mdpi.com/2076-3417/9/15/3152},
    }

  • A. Delgado Castro and J. Szymanski, “Semi-Automatic Mono-to-Stereo Upmixing via Separation of Note Events,” in Audio Engineering Society International Conference on Immersive and Interactive Audio, 2019.
    [BibTeX] [Download PDF]
    @InProceedings{DelgadoCastro2019,
    author = {Delgado Castro, Alejandro and Szymanski, John},
    title = {{Semi-Automatic Mono-to-Stereo Upmixing via Separation of Note Events}},
    booktitle = {{Audio Engineering Society International Conference on Immersive and Interactive Audio}},
    year = {2019},
    month = {Mar},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=20410},
    }

  • A. Delgado Castro, “Iterative Separation of Note Events from Single-Channel Polyphonic Recordings,” PhD Thesis, 2019.
    [BibTeX] [Download PDF]
    @PhdThesis{DelgadoCastro2019a,
    author = {Delgado Castro, Alejandro},
    title = {{Iterative Separation of Note Events from Single-Channel Polyphonic Recordings}},
    school = {Department of Electronic Engineering, University of York},
    year = {2019},
    url = {http://etheses.whiterose.ac.uk/25239/},
    }

  • A. Delgado Castro and J. Szymanski, “Semi-supervised audio source separation based on the iterative estimation and extraction of note events,” in 16th international joint conference on e-business and telecommunications – volume 1: sigmap, 2019. doi:10.5220/0007828002730279
    [BibTeX]
    @InProceedings{DelgadoCastro2019b,
    author = {Alejandro {Delgado Castro} and John Szymanski},
    title = {Semi-supervised Audio Source Separation based on the Iterative Estimation and Extraction of Note Events},
    booktitle = {16th International Joint Conference on e-Business and Telecommunications - Volume 1: SIGMAP},
    year = {2019},
    doi = {10.5220/0007828002730279},
    }

  • C. Görres and D. Chesmore, “Active sound production of scarab beetle larvae opens up new possibilities for species-specific pest monitoring in soils,” Nature Scientific Reports, vol. 9, iss. 1, p. 10115, 2019. doi:10.1038/s41598-019-46121-y
    [BibTeX]
    @Article{Gorres2019,
    author = {G\"{o}rres, Carolyn-Monika and Chesmore, David},
    title = {Active sound production of scarab beetle larvae opens up new possibilities for species-specific pest monitoring in soils},
    journal = {{Nature Scientific Reports}},
    year = {2019},
    volume = {9},
    number = {1},
    pages = {10115},
    doi = {10.1038/s41598-019-46121-y},
    }

  • M. C. Green, D. T. Murphy, S. Adavanne, and T. Virtanen, “Acoustic Scene Classification Using Higher-Order Ambisonic Features,” in IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA), 2019. doi:10.1109/WASPAA.2019.8937282
    [BibTeX]
    @InProceedings{Green2019,
    author = {M. C. Green and D. T. Murphy and S. Adavanne and T. Virtanen},
    title = {{Acoustic Scene Classification Using Higher-Order Ambisonic Features}},
    booktitle = {{IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)}},
    year = {2019},
    month = {October},
    doi = {10.1109/WASPAA.2019.8937282},
    }

  • M. C. Green and D. T. Murphy, “Sound Source Localisation in Ambisonic Audio Using Peak Clustering,” in Detection and Classification of Acoustic Scenes and Events (DCASE), 2019.
    [BibTeX] [Download PDF]
    @InProceedings{Green2019b,
    author = {M. C. Green and D. T. Murphy},
    title = {{Sound Source Localisation in Ambisonic Audio Using Peak Clustering}},
    booktitle = {{Detection and Classification of Acoustic Scenes and Events (DCASE)}},
    year = {2019},
    month = {October},
    url = {http://dcase.community/documents/workshop2019/proceedings/DCASE2019Workshop_Green_18.pdf},
    }

  • D. Johnston, B. Tsui, and G. Kearney, “SALTE Pt. 1: A Virtual Reality Tool for Streamlined and Standardized Spatial Audio Listening Tests,” in Audio Engineering Society 147th Convention, 2019.
    [BibTeX] [Download PDF]
    @Conference{Johnston2019,
    author = {Johnston, Daniel and Tsui, Benjamin and Kearney, Gavin},
    title = {{SALTE Pt. 1: A Virtual Reality Tool for Streamlined and Standardized Spatial Audio Listening Tests}},
    booktitle = {{Audio Engineering Society 147th Convention}},
    year = {2019},
    month = {Oct},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=20559},
    }

  • D. Johnston, H. Egermann, and G. Kearney, “An Interactive Spatial Audio Experience for Children with Autism Spectrum Disorder,” in Audio Engineering Society International Conference on Immersive and Interactive Audio, 2019.
    [BibTeX] [Download PDF]
    @InProceedings{johnston2019interactive,
    author = {Johnston, Daniel and Egermann, Hauke and Kearney, Gavin},
    title = {{An Interactive Spatial Audio Experience for Children with Autism Spectrum Disorder}},
    booktitle = {{Audio Engineering Society International Conference on Immersive and Interactive Audio}},
    year = {2019},
    organization = {Audio Engineering Society},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=20429},
    }

  • M. Lovedee-Turner and D. Murphy, “Three-dimensional reflector localisation and room geometry estimation using a spherical microphone array,” Journal of the Acoustical Society of America, vol. 146, iss. November, p. 3339–3352, 2019. doi:10.1121/1.5130569
    [BibTeX]
    @Article{Lovedee-turner2019,
    author = {Lovedee-Turner, Michael and Murphy, Damian},
    title = {{Three-dimensional reflector localisation and room geometry estimation using a spherical microphone array}},
    journal = {{Journal of the Acoustical Society of America}},
    year = {2019},
    volume = {146},
    number = {November},
    pages = {3339--3352},
    doi = {10.1121/1.5130569},
    }

  • M. Lovedee-Turner, “Three-Dimensional Geometry Inference of Convex and Non-Convex Rooms using Spatial Room Impulse Responses,” PhD Thesis, 2019.
    [BibTeX] [Download PDF]
    @PhdThesis{Lovedee-Turner2020,
    author = {Michael Lovedee-Turner},
    title = {{Three-Dimensional Geometry Inference of Convex and Non-Convex Rooms using Spatial Room Impulse Responses}},
    school = {Department of Electronic Engineering, University of York},
    year = {2019},
    url = {http://etheses.whiterose.ac.uk/26618/},
    }

  • T. McKenzie, D. T. Murphy, and G. Kearney, “Interaural Level Difference Optimisation of First-Order Binaural Ambisonic Rendering,” in Audio Engineering Society International Conference on Immersive and Interactive Audio, York, UK, 2019.
    [BibTeX]
    @InProceedings{McKenzie2019,
    author = {T. McKenzie and D. T. Murphy and G. Kearney},
    title = {{Interaural Level Difference Optimisation of First-Order Binaural Ambisonic Rendering}},
    booktitle = {{Audio Engineering Society International Conference on Immersive and Interactive Audio}},
    year = {2019},
    address = {York, UK},
    }

  • T. Mckenzie, D. Murphy, and G. Kearney, “An evaluation of pre-processing techniques for virtual loudspeaker binaural ambisonic rendering,” in Eaa spatial audio signal processing symposium, 2019, p. 149–154.
    [BibTeX]
    @inproceedings{mckenzie2019evaluation,
    title={An evaluation of pre-processing techniques for virtual loudspeaker binaural ambisonic rendering},
    author={Mckenzie, Thomas and Murphy, Damian and Kearney, Gavin},
    booktitle={EAA Spatial Audio Signal Processing symposium},
    pages={149--154},
    year={2019}
    }

  • T. Mckenzie, D. Murphy, and G. Kearney, “Towards a perceptually optimal bias factor for directional bias equalisation of binaural ambisonic rendering,” in Eaa spatial audio signal processing symposium, 2019, p. 97–102.
    [BibTeX]
    @inproceedings{mckenzie2019towards,
    title={Towards a perceptually optimal bias factor for directional bias equalisation of binaural ambisonic rendering},
    author={Mckenzie, Thomas and Murphy, Damian and Kearney, Gavin},
    booktitle={EAA Spatial Audio Signal Processing Symposium},
    pages={97--102},
    year={2019}
    }

  • T. McKenzie, “High Frequency Reproduction in Binaural Ambisonic Rendering,” PhD Thesis, 2019.
    [BibTeX]
    @PhdThesis{McKenzie2020,
    author = {Thomas McKenzie},
    title = {{High Frequency Reproduction in Binaural Ambisonic Rendering}},
    school = {Department of Electronic Engineering, University of York},
    year = {2019},
    }

  • H. Ogden, J. Stubbs, and G. Kearney, “A Test Database for the Assessment of Immersive Audio Systems,” in Audio Engineering Society International Conference on Immersive and Interactive Audio, 2019.
    [BibTeX] [Download PDF]
    @Conference{Ogden2019,
    author = {Ogden, Harry and Stubbs, Jess and Kearney, Gavin},
    title = {{A Test Database for the Assessment of Immersive Audio Systems}},
    booktitle = {{Audio Engineering Society International Conference on Immersive and Interactive Audio}},
    year = {2019},
    month = {Mar},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=20401},
    }

  • C. Pike, “Evaluating the Perceived Quality of Binaural Technology,” PhD Thesis, 2019.
    [BibTeX] [Download PDF]
    @PhdThesis{Pike2019,
    author = {Christopher Pike},
    title = {{Evaluating the Perceived Quality of Binaural Technology}},
    school = {Department of Electronic Engineering, University of York},
    year = {2019},
    url = {http://etheses.whiterose.ac.uk/24022/},
    }

  • J. Rees-Jones and H. Daffern, “The Hills are Alive: Capturing and Presenting an Outdoor Choral Performance for Virtual Reality,” in Audio Engineering Society International Conference on Immersive and Interactive Audio, 2019.
    [BibTeX] [Download PDF]
    @Conference{Rees-Jones2019,
    author = {Rees-Jones, Joe and Daffern, Helena},
    title = {{The Hills are Alive: Capturing and Presenting an Outdoor Choral Performance for Virtual Reality}},
    booktitle = {{Audio Engineering Society International Conference on Immersive and Interactive Audio}},
    year = {2019},
    month = {Mar},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=20447},
    }

  • T. Rudzki, C. Earnshaw, D. Murphy, and G. Kearney, “SALTE Pt. 2: On the Design of the SALTE Audio Rendering Engine for Spatial Audio Listening Tests in VR,” in Audio Engineering Society 147th Convention, 2019.
    [BibTeX] [Download PDF]
    @Conference{Rudzki2019,
    author = {Rudzki, Tomasz and Earnshaw, Chris and Murphy, Damian and Kearney, Gavin},
    title = {{SALTE Pt. 2: On the Design of the SALTE Audio Rendering Engine for Spatial Audio Listening Tests in VR}},
    booktitle = {{Audio Engineering Society 147th Convention}},
    year = {2019},
    month = {Oct},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=20560},
    }

  • T. Rudzki, P. Hening, I. Gomez-Lanzaco, J. Stubbs, T. McKenzie, J. Skoglund, D. Murphy, and G. Kearney, “Perceptual Evaluation of Bitrate Compressed Ambisonic Scenes in Loudspeaker Based Reproduction,” in Audio Engineering Society International Conference on Immersive and Interactive Audio, 2019.
    [BibTeX] [Download PDF]
    @Conference{rudzki2019pesacls,
    author = {Rudzki, Tomasz and Hening, Pierce and Gomez-Lanzaco, Ignacio and Stubbs, Jessica and McKenzie, Thomas and Skoglund, Jan and Murphy, Damian and Kearney, Gavin},
    title = {{Perceptual Evaluation of Bitrate Compressed Ambisonic Scenes in Loudspeaker Based Reproduction}},
    booktitle = {{Audio Engineering Society International Conference on Immersive and Interactive Audio}},
    year = {2019},
    month = {Mar},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=20391},
    }

  • A. Southern, D. T. Murphy, and L. Savioja, “Boundary absorption approximation in the spatial high-frequency extrapolation method for parametric room impulse response synthesis,” The Journal of the Acoustical Society of America, vol. 145, iss. 4, p. 2770–2782, 2019. doi:10.1121/1.5096162
    [BibTeX]
    @Article{Southern2019,
    author = {Alex Southern and Damian T. Murphy and Lauri Savioja},
    title = {Boundary absorption approximation in the spatial high-frequency extrapolation method for parametric room impulse response synthesis},
    journal = {{The Journal of the Acoustical Society of America}},
    year = {2019},
    volume = {145},
    number = {4},
    pages = {2770--2782},
    month = {apr},
    doi = {10.1121/1.5096162},
    publisher = {Acoustical Society of America ({ASA})},
    }

  • A. Southern, F. Stevens, and D. Murphy, “Analysing the effectiveness of approaches to auralisation for applications in environmental acoustics,” in International Congress on Acoustics, 2019.
    [BibTeX] [Download PDF]
    @InProceedings{southernanalysing,
    author = {Southern, Alex and Stevens, Frank and Murphy, Damian},
    title = {Analysing the effectiveness of approaches to auralisation for applications in environmental acoustics},
    booktitle = {{International Congress on Acoustics}},
    year = {2019},
    month = {Sep},
    url = {http://pub.dega-akustik.de/ICA2019/data/articles/001408.pdf},
    }

  • K. Young, C. Armstrong, A. I. Tew, D. T. Murphy, and G. Kearney, “A Numerical Study into Perceptually-Weighted Spectral Differences between Differently-Spaced HRTFs,” in Audio Engineering Society International Conference on Immersive and Interactive Audio, York, 2019.
    [BibTeX] [Download PDF]
    @InProceedings{Young2019,
    author = {K. Young and C. Armstrong and A. I. Tew and D. T. Murphy and G. Kearney},
    title = {{A Numerical Study into Perceptually-Weighted Spectral Differences between Differently-Spaced HRTFs}},
    booktitle = {{Audio Engineering Society International Conference on Immersive and Interactive Audio}},
    year = {2019},
    address = {York},
    publisher = {Audio Engineering Society},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=20386},
    }

  • K. Young, “Investigating the Feasibility of a Near-Field Binaural Loudspeaker System,” PhD Thesis, 2019.
    [BibTeX]
    @PhdThesis{Young2020,
    author = {Kat Young},
    title = {{Investigating the Feasibility of a Near-Field Binaural Loudspeaker System}},
    school = {Department of Electronic Engineering, University of York},
    year = {2019},
    }

  • C. Armstrong, “Improvements in the Measurement and Optimisation of Head Related Transfer Functions for Binaural Ambisonics,” PhD Thesis, 2019.
    [BibTeX]
    @PhdThesis{,
    author = {Cal Armstrong},
    title = {{Improvements in the Measurement and Optimisation of Head Related Transfer Functions for Binaural Ambisonics}},
    school = {{University of York}},
    year = {2019},
    }

2018

  • C. Armstrong, L. Thresh, D. T. Murphy, and G. Kearney, “A Perceptual Evaluation of Individual and Non-Individual HRTFs : A Case Study of the SADIE II Database,” Applied Sciences, vol. 8, iss. 11, 2018. doi:10.3390/app8112029
    [BibTeX]
    @Article{Armstrong2018,
    author = {C. Armstrong and L. Thresh and D. T. Murphy and G. Kearney},
    title = {{A Perceptual Evaluation of Individual and Non-Individual HRTFs : A Case Study of the SADIE II Database}},
    journal = {{Applied Sciences}},
    year = {2018},
    volume = {8},
    number = {11},
    doi = {10.3390/app8112029},
    keywords = {binaural,database,evaluation,hrtf,measurement,perception,spatial audio,timbre},
    publisher = {MDPI},
    }

  • C. Armstrong, D. T. Murphy, and G. Kearney, “A Bi-RADIAL Approach to Ambisonics,” in Audio Engineering Society International Conference on Audio for Virtual and Augmented Reality (AVAR), 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Armstrong2018a,
    author = {C. Armstrong and D. T. Murphy and G. Kearney},
    title = {{A Bi-RADIAL Approach to Ambisonics}},
    booktitle = {{Audio Engineering Society International Conference on Audio for Virtual and Augmented Reality (AVAR)}},
    year = {2018},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19690},
    }

  • C. Armstrong, T. Mckenzie, D. T. Murphy, and G. Kearney, “A Perceptual Spectral Difference Model for Binaural Signals,” in Audio Engineering Society 145th Convention, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Armstrong2018b,
    author = {C. Armstrong and T. Mckenzie and D. T. Murphy and G. Kearney},
    title = {{A Perceptual Spectral Difference Model for Binaural Signals}},
    booktitle = {{Audio Engineering Society 145th Convention}},
    year = {2018},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19722},
    }

  • S. D’Amario, D. M. Howard, H. Daffern, and N. Pennill, “A Longitudinal Study of Intonation in an a cappella Singing Quintet,” Journal of Voice, vol. 34, iss. 1, 2018. doi:10.1016/j.jvoice.2018.07.015
    [BibTeX]
    @Article{d2018,
    author = {S. D'Amario and D. M. Howard and H. Daffern and N. Pennill},
    title = {{A Longitudinal Study of Intonation in an a cappella Singing Quintet}},
    journal = {{Journal of Voice}},
    year = {2018},
    volume = {34},
    number = {1},
    doi = {10.1016/j.jvoice.2018.07.015},
    publisher = {Elsevier},
    }

  • S. D’Amario, H. Daffern, and F. Bailes, “A longitudinal study investigating synchronization in a singing quintet,” Journal of Voice, 2018. doi:10.1016/j.jvoice.2018.06.011
    [BibTeX]
    @Article{d2018a,
    author = {S. D'Amario and H. Daffern and F. Bailes},
    title = {A longitudinal study investigating synchronization in a singing quintet},
    journal = {{Journal of Voice}},
    year = {2018},
    doi = {10.1016/j.jvoice.2018.06.011},
    publisher = {Elsevier},
    }

  • S. D’Amario, H. Daffern, and F. Bailes, “Synchronization in singing duo performances: The roles of visual contact and leadership instruction,” Frontiers in Psychology, vol. 9, 2018. doi:10.3389/fpsyg.2018.01208
    [BibTeX]
    @Article{d2018b,
    author = {S. D'Amario and H. Daffern and F. Bailes},
    title = {{Synchronization in singing duo performances: The roles of visual contact and leadership instruction}},
    journal = {{Frontiers in Psychology}},
    year = {2018},
    volume = {9},
    doi = {10.3389/fpsyg.2018.01208},
    publisher = {Frontiers Media SA},
    }

  • S. D’Amario, H. Daffern, and F. Bailes, “A new method of onset and offset detection in ensemble singing,” Logopedics Phoniatrics Vocology, p. 1–16, 2018. doi:10.1080/14015439.2018.1452977
    [BibTeX]
    @Article{d2018c,
    author = {S. D'Amario and H. Daffern and F. Bailes},
    title = {A new method of onset and offset detection in ensemble singing},
    journal = {{Logopedics Phoniatrics Vocology}},
    year = {2018},
    pages = {1--16},
    doi = {10.1080/14015439.2018.1452977},
    publisher = {Taylor \& Francis},
    }

  • H. Daffern, D. A. Camlin, H. Egermann, A. J. Gully, G. Kearney, C. Neale, and J. Rees-Jones, “Exploring the potential of virtual reality technology to investigate the health and well being benefits of group singing,” International Journal of Performance Arts and Digital Media, p. 1–22, 2018.
    [BibTeX]
    @Article{Daffern2018,
    author = {H. Daffern and D. A. Camlin and H. Egermann and A. J. Gully and G. Kearney and C. Neale and J. Rees-Jones},
    title = {Exploring the potential of virtual reality technology to investigate the health and well being benefits of group singing},
    journal = {{International Journal of Performance Arts and Digital Media}},
    year = {2018},
    pages = {1--22},
    publisher = {Taylor \& Francis},
    }

  • S. D’Amario, “Interpersonal synchronization in ensemble singing: the roles of visual contact and leadership, and evolution across rehearsals,” PhD Thesis, 2018.
    [BibTeX] [Download PDF]
    @PhdThesis{DAmario2018,
    author = {Sara D'Amario},
    title = {Interpersonal synchronization in ensemble singing: the roles of visual contact and leadership, and evolution across rehearsals},
    school = {Department of Electronic Engineering, University of York},
    year = {2018},
    url = {http://etheses.whiterose.ac.uk/22547/},
    }

  • A. J. Gully, H. Daffern, and D. T. Murphy, “Diphthong synthesis using the dynamic 3d digital waveguide mesh,” IEEE/ACM Transactions on Audio, Speech, and Language Processing, vol. 26, iss. 2, p. 243–255, 2018.
    [BibTeX]
    @Article{gully2018,
    author = {A. J. Gully and H. Daffern and D. T. Murphy},
    title = {Diphthong synthesis using the dynamic 3d digital waveguide mesh},
    journal = {{IEEE/ACM Transactions on Audio, Speech, and Language Processing}},
    year = {2018},
    volume = {26},
    number = {2},
    pages = {243--255},
    publisher = {IEEE},
    }

  • L. Hobden, “The Morphologically Informed Perceptual Enhancement of Spatial Audio,” PhD Thesis, 2018.
    [BibTeX] [Download PDF]
    @PhdThesis{Hobden2018,
    author = {Laurence Hobden},
    title = {{The Morphologically Informed Perceptual Enhancement of Spatial Audio}},
    school = {Department of Electronic Engineering, University of York},
    year = {2018},
    url = {http://etheses.whiterose.ac.uk/24126/},
    }

  • D. Johnston, H. Egermann, and G. Kearney, “Innovative computer technology in music based interventions for individuals with autism moving beyond traditional interactive music therapy techniques,” Cogent Psychology, p. 1554773, 2018.
    [BibTeX]
    @Article{Johnston2018,
    author = {Johnston, Daniel and Egermann, Hauke and Kearney, Gavin},
    title = {Innovative computer technology in music based interventions for individuals with autism moving beyond traditional interactive music therapy techniques},
    journal = {{Cogent Psychology}},
    year = {2018},
    pages = {1554773},
    publisher = {Cogent OA},
    }

  • D. Johnston, H. Egermann, and G. Kearney, “SoundFields: A mixed reality spatial audio game for children with autism spectrum disorder,” in Audio Engineering Society 145th Convention, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Johnston2018a,
    author = {Johnston, Daniel and Egermann, Hauke and Kearney, Gavin},
    title = {Sound{F}ields: {A} mixed reality spatial audio game for children with autism spectrum disorder},
    booktitle = {{Audio Engineering Society 145th Convention}},
    year = {2018},
    organization = {Audio Engineering Society},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19717},
    }

  • M. J. Lopez, G. Kearney, and K. Hofstadter, “Audio description in the UK: what works, what doesn’t and understanding the need for personalising access,” British Journal of Visual Impairment, p. 1–18, 2018. doi:10.1177/0264619618794750
    [BibTeX] [Abstract]

    Audio Description for film and television is a pre-recorded track that uses verbal descriptions to provide information on visual aspects of a film or TV programme. In the UK it is currently the only accessibility strategy available for visually impaired audiences and although it provides access to a large number of people, its shortcomings also fail to engage others in audiovisual experiences. The Enhancing Audio Description project explores how digital audio technologies can be applied to the creation of alternatives to Audio Description with the aim of personalising access strategies. Such personalisation would allow users to select the method utilised to access audiovisual experiences, by having choices that include traditional forms of accessibility as well as sound design based methods. The present article analyses the results of a survey and focus groups in which visually impaired participants discussed the advantages and disadvantages of AD and it demonstrates not only the diversity of experiences and needs of visually impaired groups but also their eagerness for change.

    @Article{Lopez2018,
    author = {M. J. Lopez and G. Kearney and K. Hofstadter},
    title = {Audio description in the {U}{K}: what works, what doesn't and understanding the need for personalising access},
    journal = {{British Journal of Visual Impairment}},
    year = {2018},
    pages = {1--18},
    month = aug,
    issn = {0264-6196},
    abstract = {Audio Description for film and television is a pre-recorded track that uses verbal descriptions to provide information on visual aspects of a film or TV programme. In the UK it is currently the only accessibility strategy available for visually impaired audiences and although it provides access to a large number of people, its shortcomings also fail to engage others in audiovisual experiences. The Enhancing Audio Description project explores how digital audio technologies can be applied to the creation of alternatives to Audio Description with the aim of personalising access strategies. Such personalisation would allow users to select the method utilised to access audiovisual experiences, by having choices that include traditional forms of accessibility as well as sound design based methods. The present article analyses the results of a survey and focus groups in which visually impaired participants discussed the advantages and disadvantages of AD and it demonstrates not only the diversity of experiences and needs of visually impaired groups but also their eagerness for change.},
    day = {27},
    doi = {10.1177/0264619618794750},
    keywords = {Sound design, Accessibility, Film, Visual impairment},
    }

  • T. McKenzie, D. Murphy, and G. Kearney, “Diffuse-field equalisation of binaural ambisonic rendering,” Applied Sciences, vol. 8, iss. 10, 2018. doi:doi:10.3390/app8101956
    [BibTeX]
    @Article{McKenzie2018,
    author = {McKenzie, Thomas and Murphy, Damian and Kearney, Gavin},
    title = {Diffuse-field equalisation of binaural ambisonic rendering},
    journal = {{Applied Sciences}},
    year = {2018},
    volume = {8},
    number = {10},
    issn = {0036-8075},
    doi = {doi:10.3390/app8101956},
    }

  • T. McKenzie, D. Murphy, and G. Kearney, “Directional bias equalisation of first-order binaural ambisonic rendering,” in Audio Engineering Society International Conference on Audio for Virtual and Augmented Reality (AVAR), Redmond, 2018. doi:10.1016/S1352-2310(99)00471-9
    [BibTeX]
    @InProceedings{McKenzie2018a,
    author = {McKenzie, Thomas and Murphy, Damian and Kearney, Gavin},
    title = {Directional bias equalisation of first-order binaural ambisonic rendering},
    booktitle = {{Audio Engineering Society International Conference on Audio for Virtual and Augmented Reality (AVAR)}},
    year = {2018},
    address = {Redmond},
    doi = {10.1016/S1352-2310(99)00471-9},
    isbn = {0780314476},
    issn = {1352-2310},
    pmid = {7641619},
    }

  • D. R. Méndez, C. Armstrong, J. Stubbs, M. Stiles, and G. Kearney, “Practical Recording Techniques for Music Production with Six-Degrees of Freedom Virtual Reality,” in Audio Engineering Society 145th Convention, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Mendez2018,
    author = {M{\'{e}}ndez, David R. and Armstrong, Cal and Stubbs, Jessica and Stiles, Mirek and Kearney, Gavin},
    title = {{Practical Recording Techniques for Music Production with Six-Degrees of Freedom Virtual Reality}},
    booktitle = {{Audio Engineering Society 145th Convention}},
    year = {2018},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19729},
    }

  • J. Rees-Jones, “The impact of multichannel game audio on the quality of player experience and in-game performance,” PhD Thesis, 2018.
    [BibTeX] [Download PDF]
    @PhdThesis{Rees-Jones2018,
    author = {Joe Rees-Jones},
    title = {The Impact of Multichannel Game Audio on the Quality of Player Experience and In-game Performance},
    school = {Department of Electronic Engineering, University of York},
    year = {2018},
    url = {http://etheses.whiterose.ac.uk/23687/},
    }

  • J. Rees-Jones and D. T. Murphy, “The impact of multichannel game audio on the quality and enjoyment of player experience,” in Emotion in video game soundtracking, Springer, 2018.
    [BibTeX] [Download PDF]
    @InCollection{Rees-Jones2018a,
    author = {Joe Rees-Jones and Damian T. Murphy},
    title = {The Impact of Multichannel Game Audio on the Quality and Enjoyment of Player Experience},
    booktitle = {Emotion in Video Game Soundtracking},
    publisher = {Springer},
    year = {2018},
    series = {International Series on Computer Entertainment and Media Technology},
    url = {https://link.springer.com/chapter/10.1007/978-3-319-72272-6_11},
    }

  • C. Rougier, “Influence of Crossover Frequency on a Hybrid Acoustic Model for Room Impulse Response Synthesis,” Master Thesis, 2018.
    [BibTeX] [Download PDF]
    @MastersThesis{Rougier2018,
    author = {Charlotte Rougier},
    title = {{Influence of Crossover Frequency on a Hybrid Acoustic Model for Room Impulse Response Synthesis}},
    school = {Department of Electronic Engineering, University of York},
    year = {2018},
    url = {http://etheses.whiterose.ac.uk/20566/},
    }

  • R. Rudnicki and J. Brereton, “Sonicules – Designing drugs with sound: approaches to sound design for film, audiovisual performance and interactive sonification,” in Soundings: Documentary Film and the Listening Experience, G. Wall, Ed., University of Huddersfield, 2018.
    [BibTeX]
    @InCollection{Rudnicki2018,
    author = {Rudnicki, R. and Brereton, J.},
    title = {{Sonicules - Designing drugs with sound: approaches to sound design for film, audiovisual performance and interactive sonification}},
    booktitle = {{Soundings: {D}ocumentary Film and the Listening Experience}},
    publisher = {University of Huddersfield},
    year = {2018},
    editor = {Wall, Geoffrey},
    }

  • T. Rudzki, D. Murphy, and G. Kearney, “A DAW-Based Interactive Tool for Perceptual Spatial Audio Evaluation,” in Audio Engineering Society 145th Convention, 2018.
    [BibTeX] [Download PDF]
    @Conference{rudzki2018a,
    author = {Rudzki, Tomasz and Murphy, Damian and Kearney, Gavin},
    title = {{A DAW-Based Interactive Tool for Perceptual Spatial Audio Evaluation}},
    booktitle = {{Audio Engineering Society 145th Convention}},
    year = {2018},
    month = {Oct},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19730},
    }

  • F. Stevens, D. T. Murphy, and S. L. Smith, “Soundscape auralisation and visualisation: A cross-modal approach to soundscape evaluation,” in DAFx 2018, Aveiro, Portugal, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2018,
    author = {F. Stevens and D. T. Murphy and S. L. Smith},
    title = {Soundscape auralisation and visualisation: {A} cross-modal approach to soundscape evaluation},
    booktitle = {{DAFx 2018}},
    year = {2018},
    address = {Aveiro, Portugal},
    month = sep,
    url = {http://dafx2018.web.ua.pt/papers/DAFx2018_paper_3.pdf},
    }

  • F. Stevens, “Strategies for Environmental Sound Measurement, Modelling, and Evaluation,” PhD Thesis, 2018.
    [BibTeX] [Download PDF]
    @PhdThesis{stevens2018strategies,
    author = {Stevens, Francis},
    title = {{Strategies for Environmental Sound Measurement, Modelling, and Evaluation}},
    school = {Department of Electronic Engineering, University of York},
    year = {2018},
    url = {http://etheses.whiterose.ac.uk/22661/},
    }

  • S. Ternström, S. D’Amario, and A. Selamtzis, “Effects of the Lung Volume on the Electroglottographic Waveform in Trained Female Singers,” Journal of Voice, 2018. doi:10.1016/j.jvoice.2018.09.006
    [BibTeX] [Download PDF]
    @Article{Ternstrom2018,
    author = {S. Ternstr\"{o}m and S. D'Amario, and A. Selamtzis},
    title = {{Effects of the Lung Volume on the Electroglottographic Waveform in Trained Female Singers}},
    journal = {{Journal of Voice}},
    year = {2018},
    doi = {10.1016/j.jvoice.2018.09.006},
    url = {https://doi.org/10.1016/j.jvoice.2018.09.006},
    }

  • B. Tsui and G. Kearney, “A head-related transfer function database consolidation tool for high variance machine learning algorithms,” in Audio Engineering Society 145th Convention, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Tsui2018,
    author = {Tsui, Benjamin and Kearney, Gavin},
    title = {A head-related transfer function database consolidation tool for high variance machine learning algorithms},
    booktitle = {{Audio Engineering Society 145th Convention}},
    year = {2018},
    month = oct,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19716},
    }

  • R. R. Vos, D. T. Murphy, D. M. Howard, and H. Daffern, “Determining the relevant criteria for three-dimensional vocal tract characterization,” Journal of Voice, vol. 32, iss. 2, p. 130–142, 2018.
    [BibTeX]
    @Article{Vos2018,
    author = {R. R. Vos and D. T. Murphy and D. M. Howard and H. Daffern},
    title = {Determining the relevant criteria for three-dimensional vocal tract characterization},
    journal = {{Journal of Voice}},
    year = {2018},
    volume = {32},
    number = {2},
    pages = {130--142},
    publisher = {Elsevier},
    }

  • R. Vos, “Resonance tuning in professional operatic sopranos,” PhD Thesis, 2018.
    [BibTeX] [Download PDF]
    @PhdThesis{Vos2018a,
    author = {Rebecca Vos},
    title = {Resonance Tuning in Professional Operatic Sopranos},
    school = {Department of Electronic Engineering, University of York},
    year = {2018},
    url = {http://etheses.whiterose.ac.uk/23154/},
    }

  • R. R. Vos, D. T. Murphy, D. M. Howard, and H. Daffern, “The perception of formant tuning in soprano voices,” Journal of Voice, vol. 32, iss. 1, p. 126–e1, 2018.
    [BibTeX]
    @Article{Vos2018b,
    author = {R. R. Vos and D. T. Murphy and D. M. Howard and H. Daffern},
    title = {The Perception of Formant Tuning in Soprano Voices},
    journal = {{Journal of Voice}},
    year = {2018},
    volume = {32},
    number = {1},
    pages = {126--e1},
    publisher = {Elsevier},
    }

  • K. Young, G. Kearney, and A. I. Tew, “Acoustic validation of a BEM-suitable mesh model of KEMAR,” in Audio Engineering Society International Conference on Spatial Reproduction: Aesthetics and Science, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Young2018,
    author = {K. Young and G. Kearney and A. I. Tew},
    title = {Acoustic validation of a {BEM}-suitable mesh model of {KEMAR}},
    booktitle = {{Audio Engineering Society International Conference on Spatial Reproduction: Aesthetics and Science}},
    year = {2018},
    publisher = {Audio Engineering Society},
    day = {30},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19662},
    }

  • K. Young, G. Kearney, and A. I. Tew, “Loudspeaker positions with sufficient natural channel separation for binaural reproduction,” in Audio Engineering Society International Conference on Spatial Reproduction: Aesthetics and Science, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Young2018a,
    author = {K. Young and G. Kearney and A. I. Tew},
    title = {Loudspeaker positions with sufficient natural channel separation for binaural reproduction},
    booktitle = {{Audio Engineering Society International Conference on Spatial Reproduction: Aesthetics and Science}},
    year = {2018},
    month = jul,
    publisher = {Audio Engineering Society},
    day = {30},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19649},
    }

  • K. Young, M. Lovedee-Turner, J. Brereton, and H. Daffern, “The Impact of Gender on Conference Authorship in Audio Engineering : Analysis Using a New Data Collection Method,” IEEE Transactions on Education, vol. 61, iss. 4, p. 328 – 335, 2018. doi:10.1109/TE.2018.2814613
    [BibTeX]
    @Article{Young2018b,
    author = {K. Young and M. {Lovedee-Turner} and J. Brereton and H. Daffern},
    title = {{The Impact of Gender on Conference Authorship in Audio Engineering : Analysis Using a New Data Collection Method}},
    journal = {{IEEE Transactions on Education}},
    year = {2018},
    volume = {61},
    number = {4},
    pages = {328 -- 335},
    doi = {10.1109/TE.2018.2814613},
    }

2017

  • C. Armstrong, A. Chadwick, L. Thresh, D. T. Murphy, and G. Kearney, “Simultaneous HRTF Measurement of Multiple Source Configurations Utilizing Semi-Permanent Structural Mounts,” in Audio engineering society 143rd convention, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Armstrong2017,
    author = {C. Armstrong and A. Chadwick and L. Thresh and D. T. Murphy and G. Kearney},
    title = {{Simultaneous HRTF Measurement of Multiple Source Configurations Utilizing Semi-Permanent Structural Mounts}},
    booktitle = {Audio Engineering Society 143rd Convention},
    year = {2017},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19311},
    }

  • B. Baruah, T. Ward, and J. Brereton, “An e-learning tool for reflective practice and enhancing employability among engineering students,” in European association for education in electrical and information engineering (EAEEIE) annual conference, 2017.
    [BibTeX]
    @INPROCEEDINGS{Baruah2017,
    title = "An e-learning tool for reflective practice and enhancing
    employability among engineering students",
    booktitle = "European Association for Education in Electrical and
    Information Engineering ({EAEEIE}) Annual Conference",
    author = "Baruah, Bidyut and Ward, Tony and Brereton, Jude",
    year = "2017",
    location = "Grenoble, France"
    }

  • J. Brereton, “Music perception and performance in virtual acoustic spaces,” in Body, sound and space in music and beyond: multimodal explorations, C. Wöllner, Ed., Routledge, 2017.
    [BibTeX]
    @InCollection{Brereton2017,
    author = {Brereton, J.},
    title = {Music perception and performance in virtual acoustic spaces},
    booktitle = {Body, Sound and Space in Music and Beyond: Multimodal Explorations},
    publisher = {Routledge},
    year = {2017},
    editor = {W{\"o}llner, Clemens},
    series = {SEMPRE Studies in The Psychology of Music},
    }

  • K. Brown, M. Paradis, and D. T. Murphy, “OpenAirLib: A JavaScript Library for the Acoustics of Spaces,” in Audio Engineering Society 142nd Convention, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Brown2017,
    author = {K. Brown and M. Paradis and D. T. Murphy},
    title = {{Open{A}ir{L}ib: {A} {J}ava{S}cript Library for the Acoustics of Spaces}},
    booktitle = {{Audio Engineering Society 142nd Convention}},
    year = {2017},
    organization = {Audio Engineering Society},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18586},
    }

  • J. G. Burton, D. T. Murphy, and J. S. Brereton, “Perception of Low Frequency Content of Amplified Music in Arenas and Open-Air Music Festivals,” in Audio Engineering Society International Conference on Sound Reinforcement–Open Air Venues, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Burton2017-on,
    author = {Burton, Jonathan G. and Murphy, Damian T. and Brereton, Jude S.},
    title = {{Perception of Low Frequency Content of Amplified Music in Arenas and {Open-Air} Music Festivals}},
    booktitle = {{Audio Engineering Society International Conference on Sound {Reinforcement--Open} Air Venues}},
    year = {2017},
    publisher = {Audio Engineering Society},
    institution = {Audio Engineering Society},
    language = {en},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19179},
    }

  • S. D’Amario and H. Daffern, “Using electrolaryngography and electroglottography to assess the singing voice: a systematic review.,” Psychomusicology: music, mind, and brain, vol. 27, iss. 4, p. 229, 2017. doi:10.1037/pmu0000184
    [BibTeX]
    @Article{d2017using,
    author = {S. D'Amario and H. Daffern},
    title = {Using electrolaryngography and electroglottography to assess the singing voice: A systematic review.},
    journal = {Psychomusicology: Music, Mind, and Brain},
    year = {2017},
    volume = {27},
    number = {4},
    pages = {229},
    doi = {10.1037/pmu0000184},
    publisher = {Educational Publishing Foundation},
    }

  • H. Daffern, “Blend in singing ensemble performance: vibrato production in a vocal quartet,” Journal of voice, vol. 31, iss. 3, p. 385.e23 – 385.e29, 2017. doi:https://doi.org/10.1016/j.jvoice.2016.09.007
    [BibTeX] [Download PDF]
    @article{daffern2017,
    title = "Blend in Singing Ensemble Performance: Vibrato Production in a Vocal Quartet",
    journal = "Journal of Voice",
    volume = "31",
    number = "3",
    pages = "385.e23 - 385.e29",
    year = "2017",
    issn = "0892-1997",
    doi = "https://doi.org/10.1016/j.jvoice.2016.09.007",
    url = "http://www.sciencedirect.com/science/article/pii/S0892199716302156",
    author = "H. Daffern",
    }

  • M. C. Green and D. T. Murphy, “Acoustic scene classification using spatial features,” Detection and classification of acoustic scenes and events (dcase), p. 16–17, 2017.
    [BibTeX] [Download PDF]
    @Article{Green2017,
    author = {M. C. Green and D. T. Murphy},
    title = {Acoustic Scene Classification Using Spatial Features},
    journal = {Detection and Classification of Acoustic Scenes and Events (DCASE)},
    year = {2017},
    pages = {16--17},
    url = {http://www.cs.tut.fi/sgn/arg/dcase2017/documents/workshop_papers/DCASE2017Workshop_Green_126.pdf},
    }

  • M. C. Green and D. T. Murphy, “Eigenscape: A database of spatial acoustic scene recordings,” Applied Sciences, vol. 7, iss. 11, p. 1204, 2017. doi:10.3390/app7111204
    [BibTeX]
    @Article{Green2017a,
    author = {M. C. Green and D. T. Murphy},
    title = {EigenScape: {A} Database of Spatial Acoustic Scene Recordings},
    journal = {{Applied Sciences}},
    year = {2017},
    volume = {7},
    number = {11},
    pages = {1204},
    doi = {10.3390/app7111204},
    publisher = {Multidisciplinary Digital Publishing Institute},
    }

  • A. Gully, “Diphthong synthesis using the three-dimensional dynamic digital waveguide mesh,” PhD Thesis, 2017.
    [BibTeX] [Download PDF]
    @PhdThesis{Gully2017,
    author = {Amelia Gully},
    title = {Diphthong Synthesis using the Three-Dimensional Dynamic Digital Waveguide Mesh},
    school = {Department of Electronic Engineering, University of York},
    year = {2017},
    url = {http://etheses.whiterose.ac.uk/20043/},
    }

  • T. McKenzie, D. Murphy, and G. Kearney, “Assessing the Authenticity of the KEMAR Mouth Simulator as a Repeatable Speech Source,” in Audio engineering society 143rd convention, New York, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{McKenzie2017,
    author = {McKenzie, Thomas and Murphy, Damian and Kearney, Gavin},
    title = {{Assessing the Authenticity of the KEMAR Mouth Simulator as a Repeatable Speech Source}},
    booktitle = {Audio Engineering Society 143rd Convention},
    year = {2017},
    address = {New York},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19217},
    }

  • T. Mckenzie, G. Kearney, and D. Murphy, “Diffuse-Field Equalisation of First Order Ambisonics,” in Dafx 2017, Edinburgh, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Mckenzie2017,
    author = {Mckenzie, Thomas and Kearney, Gavin and Murphy, Damian},
    title = {{Diffuse-Field Equalisation of First Order Ambisonics}},
    booktitle = {DAFx 2017},
    year = {2017},
    address = {Edinburgh},
    url = {http://www.dafx17.eca.ed.ac.uk/papers/DAFx17_paper_31.pdf},
    }

  • D. T. Murphy, S. Shelley, A. Foteinou, J. Brereton, and H. Daffern, “Acoustic heritage and audio creativity: the creative application of sound in the representation, understanding and experience of past environments,” Internet archaeology, 2017.
    [BibTeX]
    @article{Murphy2017,
    title={Acoustic Heritage and Audio Creativity: the Creative Application of Sound in the Representation, Understanding and Experience of Past Environments},
    author={D. T. Murphy and S. Shelley and A. Foteinou and J. Brereton and H. Daffern},
    journal={Internet Archaeology},
    year={2017},
    publisher={Council for British Archaeology}
    }

  • J. Rees-Jones and D. T. Murphy, “Spatial quality and user preference of headphone based multichannel audio rendering systems for video games: a pilot study,” in Audio engineering society 142nd convention, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Rees-Jones2017,
    author = {Rees-Jones, Joe and Murphy, Damian T.},
    title = {Spatial Quality and User Preference of Headphone Based Multichannel Audio Rendering Systems for Video Games: A Pilot Study},
    booktitle = {Audio Engineering Society 142nd Convention},
    year = {2017},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18648},
    }

  • H. Riaz, M. Stiles, C. Armstrong, H. Lee, and G. Kearney, “Multichannel Microphone Array Recording for Popular Music Production in Virtual Reality,” in Audio engineering society 143rd convention, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Riaz2017,
    author = {Riaz, Hashim and Stiles, Mirek and Armstrong, Cal and Lee, Hyunkook and Kearney, Gavin},
    title = {{Multichannel Microphone Array Recording for Popular Music Production in Virtual Reality}},
    booktitle = {Audio Engineering Society 143rd Convention},
    year = {2017},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19333},
    }

  • A. Southern, F. Stevens, and D. T. Murphy, “Sounding out smart cities: Auralization and soundscape monitoring for environmental sound design,” The journal of the acoustical society of america, vol. 141, iss. 5, p. 3880, 2017. doi:10.1121/1.4988686
    [BibTeX]
    @Article{Southern2017,
    author = {A. Southern and F. Stevens and D. T. Murphy},
    title = {Sounding out smart cities: {A}uralization and soundscape monitoring for environmental sound design},
    journal = {The Journal of the Acoustical Society of America},
    year = {2017},
    volume = {141},
    number = {5},
    pages = {3880},
    doi = {10.1121/1.4988686},
    }

  • F. Stevens, D. T. Murphy, and S. L. Smith, “Ecological validity of stereo UHJ soundscape reproduction,” in Audio engineering society 142nd convention, Berlin, Germany, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2017,
    author = {F. Stevens and D. T. Murphy and S. L. Smith},
    title = {Ecological validity of stereo {UHJ} soundscape reproduction},
    booktitle = {Audio Engineering Society 142nd Convention},
    year = {2017},
    address = {Berlin, Germany},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18641},
    }

  • F. Stevens, D. T. Murphy, L. Savioja, and V. Välimäki, “Modeling sparsely reflecting outdoor acoustic scenes using the waveguide web,” IEEE/ACM Transactions on Audio, Speech and Language Processing (TASLP), vol. 25, iss. 8, p. 1566–1578, 2017. doi:10.1109/TASLP.2017.2699424
    [BibTeX]
    @Article{Stevens2017a,
    author = {Stevens, F. and Murphy, D. T. and Savioja, L. and V{\"a}lim{\"a}ki, V.},
    title = {{Modeling sparsely reflecting outdoor acoustic scenes using the waveguide web}},
    journal = {{IEEE/ACM Transactions on Audio, Speech and Language Processing (TASLP)}},
    year = {2017},
    volume = {25},
    number = {8},
    pages = {1566--1578},
    doi = {10.1109/TASLP.2017.2699424},
    }

  • F. Stevens, D. T. Murphy, and S. L. Smith, “Soundscape categorisation and the self-assessment manikin,” in Dafx 2017, Edinburgh, UK, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2017b,
    author = {Stevens, F. and Murphy, D. T. and Smith, S. L.},
    title = {Soundscape categorisation and the self-assessment manikin},
    booktitle = {DAFx 2017},
    year = {2017},
    address = {Edinburgh, UK},
    url = {http://www.dafx17.eca.ed.ac.uk/papers/DAFx17_paper_7.pdf},
    }

  • L. Thresh, C. Armstrong, and G. Kearney, “A direct comparison of localization performance when using first, third, and fifth ambisonics order for real loudspeaker and virtual loudspeaker rendering,” in Audio engineering society 143rd convention, 2017.
    [BibTeX] [Download PDF]
    @Conference{Thresh2017,
    author = {Thresh, Lewis and Armstrong, Cal and Kearney, Gavin},
    title = {A direct comparison of localization performance when using first, third, and fifth ambisonics order for real loudspeaker and virtual loudspeaker rendering},
    booktitle = {Audio Engineering Society 143rd Convention},
    year = {2017},
    month = oct,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19261},
    }

  • R. R. Vos, H. Daffern, and D. M. Howard, “Resonance tuning in three girl choristers,” Journal of voice, vol. 31, iss. 1, p. 122.e1 – 122.e7, 2017. doi:https://doi.org/10.1016/j.jvoice.2016.01.013
    [BibTeX] [Download PDF]
    @article{Vos2017,
    title = "Resonance Tuning in Three Girl Choristers",
    journal = "Journal of Voice",
    volume = "31",
    number = "1",
    pages = "122.e1 - 122.e7",
    year = "2017",
    issn = "0892-1997",
    doi = "https://doi.org/10.1016/j.jvoice.2016.01.013",
    url = "http://www.sciencedirect.com/science/article/pii/S0892199716000291",
    author = "R. R. Vos and H. Daffern and D. M. Howard",
    }

  • R. Zolfaghari, N. Epain, C. Jin, J. Glaunés, and A. I. Tew, “Kernal principal component analysis of the ear morphology,” in Ieee international conference on acoustics, speech, and signal processing (icassp), 2017. doi:10.1109/ICASSP.2017.7952202
    [BibTeX] [Abstract]

    This paper describes features in the ear shape that change across a population of ears and explores the corresponding changes in ear acoustics. The statistical analysis conducted over the space of ear shapes uses a kernel principal component analysis (KPCA). Further, it utilizes the framework of large deformation diffeomorphic metric mapping and the vector space that is constructed over the space of initial momentums, which describes the diffeomorphic transformations from the reference template ear shape. The population of ear shapes examined by the KPCA are 124 left and right ear shapes from the SYMARE database that were rigidly aligned to the template (population average) ear. In the work presented here we show the morphological variations captured by the first two kernel principal components, and also show the acoustic transfer functions of the ears which are computed using fast multipole boundary element method simulations.

    @InProceedings{Zolfaghari2017,
    author = {R. Zolfaghari and N. Epain and C. Jin and J. Glaun{\'e}s and A. I. Tew},
    title = {Kernal principal component analysis of the ear morphology},
    booktitle = {IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
    year = {2017},
    month = mar,
    publisher = {IEEE},
    abstract = {This paper describes features in the ear shape that change across a population of ears and explores the corresponding changes in ear acoustics. The statistical analysis conducted over the space of ear shapes uses a kernel principal component analysis (KPCA). Further, it utilizes the framework of large deformation diffeomorphic metric mapping and the vector space that is constructed over the space of initial momentums, which describes the diffeomorphic transformations from the reference template ear shape. The population of ear shapes examined by the KPCA are 124 left and right ear shapes from the SYMARE database that were rigidly aligned to the template (population average) ear. In the work presented here we show the morphological variations captured by the first two kernel principal components, and also show the acoustic transfer functions of the ears which are computed using fast multipole boundary element method simulations.},
    day = {5},
    doi = {10.1109/ICASSP.2017.7952202},
    issn = {2379-190X},
    keywords = {Morphoacoustics, LDDMM, Kernel principal Component analysis, Ear shape analysis, FM-BEM},
    }

2016

  • C. Armstrong and J. Brereton, “The Application of Flexilink in Multi-User Virtual Acoustic Environments,” in Interactive audio systems symposium, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Armstrong2016,
    author = {Armstrong, Cal and Brereton, Jude},
    title = {{The Application of Flexilink in Multi-User Virtual Acoustic Environments}},
    booktitle = {Interactive Audio Systems Symposium},
    year = {2016},
    url = {https://www.york.ac.uk/sadie-project//IASS2016/IASS{\_}Papers/IASS{\_}2016{\_}paper{\_}20.pdf},
    }

  • C. Armstrong and J. Brereton, “A Filter Based Approach to Sound Source Simulation Through an Outward Facing Spherical Array of Loudspeakers 2 Related Work Directivity Measurements Definitions,” in Interactive audio systems symposium, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Armstrong2016a,
    author = {Armstrong, Cal and Brereton, Jude},
    title = {{A Filter Based Approach to Sound Source Simulation Through an Outward Facing Spherical Array of Loudspeakers 2 Related Work Directivity Measurements Definitions}},
    booktitle = {Interactive Audio Systems Symposium},
    year = {2016},
    url = {https://www.york.ac.uk/sadie-project//IASS2016/IASS{\_}Papers/IASS{\_}2016{\_}paper{\_}21.pdf},
    }

  • J. Brereton, “Making learning authentic: real-world assessments for masters level study,” Forum, iss. 40, p. 14–15, 2016.
    [BibTeX]
    @Article{Brereton2016-os,
    author = {Brereton, J.},
    title = {Making learning authentic: real-world assessments for masters level study},
    journal = {Forum},
    year = {2016},
    number = {40},
    pages = {14--15},
    }

  • J. Burton, “Perception of low frequency content of amplified music in music arenas and open-air music festivals,” Master Thesis, 2016.
    [BibTeX] [Download PDF]
    @MastersThesis{Burton2016,
    author = {Jon Burton},
    title = {Perception of Low Frequency Content of Amplified Music in Music Arenas and Open-air Music Festivals},
    school = {Department of Electronic Engineering, University of York},
    year = {2016},
    url = {http://etheses.whiterose.ac.uk/16944/},
    }

  • N. Cong, “A multi-dimensional analytical model for musical harmony perception,” PhD Thesis, 2016.
    [BibTeX] [Download PDF]
    @PhdThesis{Cong2016,
    author = {Ning Cong},
    title = {A Multi-Dimensional Analytical Model for Musical Harmony Perception},
    school = {Department of Electronic Engineering, University of York},
    year = {2016},
    url = {http://etheses.whiterose.ac.uk/17330/},
    }

  • A. Delgado Castro and J. E. Szymanski, “Improved pitch trajectory estimation for polyphonic single-channel audio mixtures,” in DMRN+11: Digital Music Research Network One-day Workshop, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{DelgadoCastro2016,
    author = {Delgado Castro, Alejandro and Szymanski, John E.},
    title = {Improved pitch trajectory estimation for polyphonic single-channel audio mixtures},
    booktitle = {{DMRN+11: Digital Music Research Network One-day Workshop}},
    year = {2016},
    url = {https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/19345/Kudumakis%20DMRN+11%3A%20Digital%20Music%202016%20Published.pdf?sequence=1},
    }

  • A. Delgado Castro and J. E. Szymanski, “Multipitch estimation applied to single-channel audio source separation: relevant techniques and challenges,” in York Doctoral Symposium 2016, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{DelgadoCastro2016a,
    author = {Alejandro {Delgado Castro} and John E. Szymanski},
    title = {Multipitch Estimation Applied to Single-Channel Audio Source Separation: Relevant Techniques and Challenges},
    booktitle = {{York Doctoral Symposium 2016}},
    year = {2016},
    url = {https://www.cs.york.ac.uk/yds/yds2016/proceedings/castro2016multipitch.pdf},
    }

  • J. Gao, “The use of optimal cue mapping to improve the intelligibility and quality of speech in complex binaural sound mixtures,” PhD Thesis, 2016.
    [BibTeX] [Download PDF]
    @PhdThesis{Gao2016,
    author = {Jingbo Gao},
    title = {The Use of Optimal Cue Mapping to Improve the Intelligibility and Quality of Speech in Complex Binaural Sound Mixtures},
    school = {Department of Electronic Engineering, University of York},
    year = {2016},
    url = {http://etheses.whiterose.ac.uk/13174/},
    }

  • M. C. Green, J. Szymanski, and M. Speed, “Assessing the suitability of the magnitude slope deviation detection criterion for use in automatic acoustic feedback control,” in Dafx 2016, 2016, pp. 85-92.
    [BibTeX] [Download PDF]
    @InProceedings{Green2016,
    author = {M. C. Green and J. Szymanski and M. Speed},
    title = {Assessing the Suitability of the Magnitude Slope Deviation Detection Criterion for use in Automatic Acoustic Feedback Control},
    booktitle = {DAFx 2016},
    year = {2016},
    pages = {85 - 92},
    month = {September},
    url = {http://dafx16.vutbr.cz/dafxpapers/12-DAFx-16_paper_23-PN.pdf},
    }

  • A. Hinde, “Concurrency in auditory displays for connected television,” PhD Thesis, 2016.
    [BibTeX] [Download PDF]
    @PhdThesis{Hinde2016,
    author = {Alistair Hinde},
    title = {Concurrency in auditory displays for connected television},
    school = {Department of Electronic Engineering, University of York},
    year = {2016},
    url = {http://etheses.whiterose.ac.uk/16724/},
    }

  • S. Hughes and G. Kearney, “Auditory immersion of 5.1 virtualization within gameplay,” Millennium biltmore hotel, los angeles, ca, p. 18, 2016.
    [BibTeX]
    @Article{Hughes2016,
    author = {S. Hughes and G. Kearney},
    title = {Auditory immersion of 5.1 virtualization within gameplay},
    journal = {Millennium Biltmore Hotel, Los Angeles, CA},
    year = {2016},
    pages = {18},
    }

  • S. Hughes and G. Kearney, “Moving virtual source perception in 2D space,” in Audio engineering society international conference on audio for virtual and augmented reality, 2016.
    [BibTeX] [Download PDF]
    @Conference{Hughes2016a,
    author = {S. Hughes and G. Kearney},
    title = {Moving virtual source perception in 2{D} space},
    booktitle = {Audio Engineering Society International Conference on Audio for Virtual and Augmented Reality},
    year = {2016},
    month = sep,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18492},
    }

  • A. Hunt, Managing your project: Achieving success with minimal stress, CreateSpace Independent Publishing Platform, 2016.
    [BibTeX] [Download PDF]
    @Book{Hunt2016,
    title = {Managing your project: {A}chieving success with minimal stress},
    publisher = {CreateSpace Independent Publishing Platform},
    year = {2016},
    author = {Hunt, A.},
    isbn = {9781537212203},
    url = {https://books.google.co.uk/books?id=gvs1vgAACAAJ},
    }

  • G. Kearney, “Auditory height perception in cross-talk cancellation using low order HRTF approximation,” in Reproduced sound, 2016.
    [BibTeX]
    @InProceedings{Kearney2016,
    author = {Kearney, Gavin},
    title = {Auditory height perception in cross-talk cancellation using low order {HRTF} approximation},
    booktitle = {Reproduced Sound},
    year = {2016},
    organization = {Institute of Acoustics},
    }

  • G. Kearney, “The perception of auditory height in individualised and non-individualized dynamic cross-talk cancellation,” in Audio engineering society international conference on sound field control, 2016.
    [BibTeX] [Download PDF]
    @Conference{Kearney2016a,
    author = {G. Kearney},
    title = {The perception of auditory height in individualised and non-individualized dynamic cross-talk cancellation},
    booktitle = {Audio Engineering Society International Conference on Sound Field Control},
    year = {2016},
    month = jul,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18323},
    }

  • G. Kearney, H. Daffern, L. Thresh, H. Omodudu, C. Armstrong, and J. Brereton, “Design of an interactive virtual reality system for ensemble singing,” in Interactive audio systems symposium, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Kearney2016b,
    author = {Kearney, Gavin and Daffern, Helena and Thresh, Lewis and Omodudu, Haroom and Armstrong, Calum and Brereton, Jude},
    title = {Design of an interactive virtual reality system for ensemble singing},
    booktitle = {Interactive Audio Systems Symposium},
    year = {2016},
    organization = {University of York},
    url = { https://www.york.ac.uk/sadie-project/IASS2016/IASS_Papers/IASS_2016_paper_1.pdf },
    }

  • G. C. Kearney, H. Daffern, L. Thresh, H. Omodudu, C. Armstrong, and J. S. Brereton, “Design of an interactive virtual reality system for ensemble singing,” in Interactive audio systems symposium, 2016.
    [BibTeX]
    @InProceedings{Kearney2016-ju,
    author = {Kearney, Gavin Cyril and Daffern, Helena and Thresh, Lewis and Omodudu, Haroom and Armstrong, Calum and Brereton, Judith Sara},
    title = {Design of an Interactive Virtual Reality System for Ensemble Singing},
    booktitle = {Interactive Audio Systems Symposium},
    year = {2016},
    language = {English},
    }

  • M. Lopez, G. Kearney, and K. Hofstädter, “Enhancing audio description, spatialisation and accessibility in film and television,” in Reproduced sound, 2016.
    [BibTeX]
    @InProceedings{Lopez2016,
    author = {M. Lopez and G. Kearney and K. Hofst{\"a}dter},
    title = {Enhancing audio description, spatialisation and accessibility in film and television},
    booktitle = {Reproduced Sound},
    year = {2016},
    volume = {38},
    number = {Pt 2},
    }

  • M. Lovedee-Turner, J. Brereton, and D. Murphy, “An Algorithmic Approach to the Manipulation of B-Format Impulse Responses for Sound Source Rotation,” in Audio engineering society 61st international conference: audio for games, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Lovedee-Turner2016-ut,
    author = {Lovedee-Turner, Michael and Brereton, Jude and Murphy, Damian},
    title = {{An Algorithmic Approach to the Manipulation of {B-Format} Impulse Responses for Sound Source Rotation}},
    booktitle = {Audio Engineering Society 61st International Conference: Audio for Games},
    year = {2016},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18096},
    }

  • S. Oxnard, “Efficient hybrid virtual room acoustic modelling,” PhD Thesis, 2016.
    [BibTeX] [Download PDF]
    @PhdThesis{Oxnard2016,
    author = {Stephen Oxnard},
    title = {Efficient Hybrid Virtual Room Acoustic Modelling},
    school = {Department of Electronic Engineering, University of York},
    year = {2016},
    url = {http://etheses.whiterose.ac.uk/17459/},
    }

  • C. Pike, F. Melchior, and A. I. Tew, “Descriptive analysis of binaural rendering with virtual loudspeakers using a rate-all-that-apply approach,” in Audio engineering society international conference on headphone technology, 2016.
    [BibTeX] [Abstract] [Download PDF]

    Spatial audio content for headphones is often created using binaural rendering of a virtual loudspeaker array. It is important to understand the effect of this choice on the sound quality. A sensory profiling evaluation was used to assess the perceived differences between direct binaural rendering and virtual loudspeaker rendering of a single sound source with and without head tracking and using anechoic and reverberant binaural impulse responses. A subset of the Spatial Audio Quality Inventory (SAQI) was used. Listeners first selected only attributes that they felt applied to the given stimuli. Initial analysis shows that tone colour and source direction are most affected by the use of this technique, but source extent, distance, and externalisation are also affected. Further work is required to analyse the sparse attribute rating data in depth.

    @InProceedings{Pike2016,
    author = {C. Pike and F. Melchior and A. I. Tew},
    title = {Descriptive analysis of binaural rendering with virtual loudspeakers using a rate-all-that-apply approach},
    booktitle = {Audio Engineering Society International Conference on Headphone Technology},
    year = {2016},
    month = aug,
    abstract = {Spatial audio content for headphones is often created using binaural rendering of a virtual loudspeaker array. It is important to understand the effect of this choice on the sound quality. A sensory profiling evaluation was used to assess the perceived differences between direct binaural rendering and virtual loudspeaker rendering of a single sound source with and without head tracking and using anechoic and reverberant binaural impulse responses. A subset of the Spatial Audio Quality Inventory (SAQI) was used. Listeners first selected only attributes that they felt applied to the given stimuli. Initial analysis shows that tone colour and source direction are most affected by the use of this technique, but source extent, distance, and externalisation are also affected. Further work is required to analyse the sparse attribute rating data in depth.},
    day = {19},
    keywords = {binaural, virtual auditory space, perceptual quality},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18348},
    }

  • D. Robinson and G. Kearney, “Echolocation in virtual reality,” in Interactive audio systems symposium, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Robinson2016,
    author = {Robinson, Darren and Kearney, Gavin},
    title = {Echolocation in virtual reality},
    booktitle = {Interactive Audio Systems Symposium},
    year = {2016},
    organization = {University of York},
    url = {#https://www.york.ac.uk/sadie-project/IASS2016/IASS_Papers/IASS_2016_paper_22.pdf#},
    }

  • J. Sanderson and A. Hunt, “Using real-time sonification of heart rate data to provide a mobile based training aid for runners,” in Interactive audio systems symposium, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Sanderson2016,
    author = {J. Sanderson and A. Hunt},
    title = {Using real-time sonification of heart rate data to provide a mobile based training aid for runners},
    booktitle = {Interactive Audio Systems Symposium},
    year = {2016},
    url = {https://www.york.ac.uk/sadie-project/IASS2016/IASS_Papers/IASS_2016_paper_5.pdf},
    }

  • F. Stevens, D. T. Murphy, and S. L. Smith, “Emotion and soundscape preference rating: using semantic differential pairs and the self-assessment manikin,” in Sound and music computing, Hamburg, Germany, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2016,
    author = {Stevens, F. and Murphy, D. T. and Smith, S. L.},
    title = {Emotion and soundscape preference rating: using semantic differential pairs and the self-assessment manikin},
    booktitle = {Sound and Music Computing},
    year = {2016},
    address = {Hamburg, Germany},
    url = {https://smc2016.hfmt-hamburg.de/wp-content/uploads/2016/09/SMC2016_proceedings.pdf},
    }

  • F. Stevens, D. T. Murphy, and S. L. Smith, “The Self-Assessment Manikin and heart rate: Responses to auralised soundscapes,” in Interactive audio systems symposium, York, UK, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2016a,
    author = {F. Stevens and D. T. Murphy and S. L. Smith},
    title = {The {S}elf-{A}ssessment {M}anikin and heart rate: {R}esponses to auralised soundscapes},
    booktitle = {Interactive Audio Systems Symposium},
    year = {2016},
    address = {York, UK},
    url = {https://www.york.ac.uk/sadie-project/IASS2016/IASS_Papers/IASS_2016_paper_3.pdf},
    }

  • J. van Mourik, “Higher-order finite difference time domain algorithms for room acoustic modelling,” PhD Thesis, 2016.
    [BibTeX] [Download PDF]
    @PhdThesis{vanMourik2016,
    author = {Jelle {van Mourik}},
    title = {Higher-order Finite Difference Time Domain Algorithms for Room Acoustic Modelling},
    school = {Department of Electronic Engineering, University of York},
    year = {2016},
    url = {http://etheses.whiterose.ac.uk/15661/},
    }

  • K. Young, G. Kearney, and A. I. Tew, “Boundary element modelling of KEMAR for binaural rendering: Mesh production and validation,” in Interactive audio systems symposium, 2016.
    [BibTeX] [Abstract] [Download PDF]

    Head and torso simulators are used extensively within acoustic research, often in place of human subjects in time-consuming or repetitive experiments. Particularly common is the Knowles Electronics Manikin for Acoustic Research (KEMAR), which has the acoustic auditory properties of an average human head. As an alternative to physical acoustic measurements, the boundary element method (BEM) is widely used to calculate the propagation of sound using computational models of a scenario. Combining this technique with a compatible 3D surface mesh of KEMAR would allow for detailed binaural analysis of speaker distributions and decoder design – without the disadvantages associated with making physical measurements. This paper details the development and validation of a BEM-compatible mesh model of KEMAR, based on the original computer-aided design (CAD) file and valid up to 20 kHz. Use of the CAD file potentially allows a very close match to be achieved between the mesh and the physical manikin. The mesh is consistent with the original CAD description, both in terms of overall volume and of local topology, and the numerical requirements for BEM compatibility have been met. Computational limitations restrict usage of the mesh in its current state, so simulation accuracy cannot as yet be compared with acoustically measured HRTFs. Future work will address the production of meshes suitable for use in BEM with lower computational requirements, using the process validated in this work.

    @Conference{Young2016,
    author = {K. Young and G. Kearney and A. I. Tew},
    title = {Boundary element modelling of {KEMAR} for binaural rendering: {M}esh production and validation},
    booktitle = {Interactive Audio Systems Symposium},
    year = {2016},
    note = {Interactive Audio Systems Symposium, IASS ; Conference date: 23-09-2016 Through 23-09-2016},
    abstract = {Head and torso simulators are used extensively within acoustic research, often in place of human subjects in time-consuming or repetitive experiments. Particularly common is the Knowles Electronics Manikin for Acoustic Research (KEMAR), which has the acoustic auditory properties of an average human head. As an alternative to physical acoustic measurements, the boundary element method (BEM) is widely used to calculate the propagation of sound using computational models of a scenario. Combining this technique with a compatible 3D surface mesh of KEMAR would allow for detailed binaural analysis of speaker distributions and decoder design - without the disadvantages associated with making physical measurements.
    This paper details the development and validation of a BEM-compatible mesh model of KEMAR, based on the original computer-aided design (CAD) file and valid up to 20 kHz. Use of the CAD file potentially allows a very close match to be achieved between the mesh and the physical manikin. The mesh is consistent with the original CAD description, both in terms of overall volume and of local topology, and the numerical requirements for BEM compatibility have been met. Computational limitations restrict usage of the mesh in its current state, so simulation accuracy cannot as yet be compared with acoustically measured HRTFs. Future work will address the production of meshes suitable for use in BEM with lower computational requirements, using the process validated in this work.},
    day = {23},
    url = {https://www.york.ac.uk/sadie-project/IASS2016.html},
    }

  • D. Zantalis, “Guided matching pursuit and its application to sound source separation,” PhD Thesis, 2016.
    [BibTeX] [Download PDF]
    @PhdThesis{Zantalis2016,
    author = {Zantalis, Dimitrios},
    title = {Guided Matching Pursuit and its Application to Sound Source Separation},
    school = {Department of Electronic Engineering, University of York},
    year = {2016},
    url = {http://etheses.whiterose.ac.uk/13204/},
    }

  • R. Zolfaghari, N. Epain, C. Jin, J. Glaunés, and A. I. Tew, “Generating a morphable model of ears,” in Ieee international conference on acoustics, speech, and signal processing (icassp), 2016. doi:10.1109/ICASSP.2016.7471981
    [BibTeX] [Abstract]

    This paper describes the generation of a morphable model for external ear shapes. The aim for the morphable model is to characterize an ear shape using only a few parameters in order to assist the study of morphoacoustics. The model is derived from a statistical analysis of a population of 58 ears from the SYMARE database. It is based upon the framework of large deformation diffeomorphic metric mapping (LDDMM) and the vector space that is constructed over the space of initial momentums describing the diffeomorphic transformations. To develop a morphable model using the LDDMM framework, the initial momentums are analyzed using a kernel based principal component analysis. In this paper, we examine the ability of our morphable model to construct test ear shapes not included in the principal component analysis.

    @InProceedings{Zolfaghari2016,
    author = {R. Zolfaghari and N. Epain and C. Jin and J. Glaun{\'e}s and A. I. Tew},
    title = {Generating a morphable model of ears},
    booktitle = {IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
    year = {2016},
    month = mar,
    publisher = {IEEE},
    note = {2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP); Conference date: 20-03-2016 Through 25-03-2016},
    abstract = {This paper describes the generation of a morphable model for external ear shapes. The aim for the morphable model is to characterize an ear shape using only a few parameters in order to assist the study of morphoacoustics. The model is derived from a statistical analysis of a population of 58 ears from the SYMARE database. It is based upon the framework of large deformation diffeomorphic metric mapping (LDDMM) and the vector space that is constructed over the space of initial momentums describing the diffeomorphic transformations. To develop a morphable model using the LDDMM framework, the initial momentums are analyzed using a kernel based principal component analysis. In this paper, we examine the ability of our morphable model to construct test ear shapes not included in the principal component analysis.},
    day = {16},
    doi = {10.1109/ICASSP.2016.7471981},
    issn = {2379-190X},
    }

2015

  • E. Alon, “Analysis and synthesis of the handpan sound,” Master Thesis, 2015.
    [BibTeX] [Download PDF]
    @MastersThesis{Alon2015,
    author = {Eyal Alon},
    title = {Analysis and Synthesis of the Handpan Sound},
    school = {Department of Electronic Engineering, University of York},
    year = {2015},
    url = {http://etheses.whiterose.ac.uk/12260/},
    }

  • N. Degara, A. Hunt, and T. Hermann, “Interactive Sonification [Guest editors’ introduction],” Ieee multimedia, vol. 22, iss. 1, p. 20–23, 2015.
    [BibTeX]
    @Article{Degara2015,
    author = {Degara, Norberto and Hunt, Andy and Hermann, Thomas},
    title = {Interactive {S}onification [{G}uest editors' introduction]},
    journal = {IEEE MultiMedia},
    year = {2015},
    volume = {22},
    number = {1},
    pages = {20--23},
    publisher = {IEEE},
    }

  • J. Gao and A. I. Tew, “The segregation of spatialised speech in interference by optimal mapping of diverse cues,” in Ieee international conference on acoustics, speech, and signal processing (icassp), 2015, p. 2095–2099. doi:10.1109/ICASSP.2015.7178340
    [BibTeX] [Abstract]

    We describe optimal cue mapping (OCM), a potentially real-time binaural signal processing method for segregating a sound source in the presence of multiple interfering 3D sound sources. Spatial cues are extracted from a multisource binaural mixture and used to train artificial neural networks (ANNs) to estimate the spectral energy fraction of a wanted speech source in the mixture. Once trained, the ANN outputs form a spectral ratio mask which is applied frame-by-frame to the mixture to approximate the magnitude spectrum of the wanted speech. The speech intelligibility performance of the OCM algorithm for anechoic sound sources is evaluated on previously unseen speech mixtures using the STOI automated measures, and compared with an established reference method. The optimized integration of multiple cues offers clear performance benefits and the ability to quantify the relative importance of each cue will facilitate computationally efficient implementations.

    @InProceedings{Gao2015,
    author = {J. Gao and A. I. Tew},
    title = {The segregation of spatialised speech in interference by optimal mapping of diverse cues},
    booktitle = {IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
    year = {2015},
    pages = {2095--2099},
    month = apr,
    publisher = {IEEE},
    abstract = {We describe optimal cue mapping (OCM), a potentially real-time binaural signal processing method for segregating a sound source in the presence of multiple interfering 3D sound sources. Spatial cues are extracted from a multisource binaural mixture and used to train artificial neural networks (ANNs) to estimate the spectral energy fraction of a wanted speech source in the mixture. Once trained, the ANN outputs form a spectral ratio mask which is applied frame-by-frame to the mixture to approximate the magnitude spectrum of the wanted speech. The speech intelligibility performance of the OCM algorithm for anechoic sound sources is evaluated on previously unseen speech mixtures using the STOI automated measures, and compared with an established reference method. The optimized integration of multiple cues offers clear performance benefits and the ability to quantify the relative importance of each cue will facilitate computationally efficient implementations.},
    day = {19},
    doi = {10.1109/ICASSP.2015.7178340},
    isbn = {978-1-4673-6997-8},
    keywords = {Speech segregation, Artificial Neural Networks, ratio mask},
    }

  • A. F. Hinde, M. J. Evans, A. I. Tew, and D. M. Howard, “Onset asynchrony in spoken menus,” in International conference on auditory display (icad), 2015, p. 86–93.
    [BibTeX] [Abstract] [Download PDF]

    The menu is an important interface component, which appears unlikely to be completely superseded by modern search-based approaches. For someone who is unable to attend a screen visually, however, alternative non-visual menu formats are often problematic. A display is developed in which multiple concurrent words are presented with different amounts of onset asynchrony. The effect of different amounts of asynchrony and word length on task durations, accuracy and workload are explored. It is found that total task duration is significantly affected by both onset asynchrony and word duration. Error rates are significantly affected by both onset asynchrony, word length and their interaction, whilst subjective workload scores are only significantly affected by onset asynchrony. Overall, the results appear to suggest that the best compromise between accuracy, workload and speed may be achieved through presenting shorter or temporally-compressed words with a short inter-stimuli interval.

    @InProceedings{Hinde2015,
    author = {A. F. Hinde and M. J. Evans and A. I. Tew and D. M. Howard},
    title = {Onset asynchrony in spoken menus},
    booktitle = {International Conference on Auditory Display (ICAD)},
    year = {2015},
    pages = {86--93},
    month = jul,
    note = {This work is licensed under Creative Commons Attribution Non Commercial 4.0 International License.},
    abstract = {The menu is an important interface component, which appears unlikely to be completely superseded by modern search-based approaches. For someone who is unable to attend a screen visually, however, alternative non-visual menu formats are often problematic. A display is developed in which multiple concurrent words are presented with different amounts of onset asynchrony. The effect of different amounts of asynchrony and word length on task durations, accuracy and workload are explored. It is found that total task duration is significantly affected by both onset asynchrony and word duration. Error rates are significantly affected by both onset asynchrony, word length and their interaction, whilst subjective workload scores are only significantly affected by onset asynchrony. Overall, the results appear to suggest that the best compromise between accuracy, workload and speed may be achieved through presenting shorter or temporally-compressed words with a short inter-stimuli interval.},
    day = {8},
    isbn = {978-3-902949-01-1},
    keywords = {ONSET ASYNCHRONY, Menu navigation, Speech},
    url = {http://hdl.handle.net/1853/54112},
    }

  • L. Hobden and A. I. Tew, “Investigating head-related transfer function spectral smoothing using a sagittal plane auditory localization model,” in Ieee workshop on applications of signal processing to audio and acoustics (waspaa), 2015. doi:10.1109/WASPAA.2015.7336955
    [BibTeX] [Abstract]

    A new head-related transfer function (HRTF) smoothing algorithm is presented. HRTF magnitude responses are expressed on an equivalent rectangular bandwidth frequency scale and smoothing is increased by progressively discarding the higher frequency Fourier coefficients. A sagittal plane localization model was used to assess the degree of spectral smoothing that can be applied without significant increase in localization error. The results of the localization model simulation were compared with results from a previous perceptual investigation using an algorithm that discards coefficients on a linear frequency scale. Our findings suggest that using a perceptually motivated frequency scale yields similar localization performance using fewer than half the number of coefficients.

    @InProceedings{Hobden2015,
    author = {L. Hobden and A. I. Tew},
    title = {Investigating head-related transfer function spectral smoothing using a sagittal plane auditory localization model},
    booktitle = {IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)},
    year = {2015},
    abstract = {A new head-related transfer function (HRTF) smoothing algorithm is presented. HRTF magnitude responses are expressed on an equivalent rectangular bandwidth frequency scale and smoothing is increased by progressively discarding the higher frequency Fourier coefficients. A sagittal plane localization model was used to assess the degree of spectral smoothing that can be applied without significant increase in localization error. The results of the localization model simulation were compared with results from a previous perceptual investigation using an algorithm that discards coefficients on a linear frequency scale. Our findings suggest that using a perceptually motivated frequency scale yields similar localization performance using fewer than half the number of coefficients.},
    doi = {10.1109/WASPAA.2015.7336955},
    keywords = {head-related transfer function, spatial sound, spectral smoothing, auditory localization model},
    }

  • S. Hughes and G. Kearney, “Fear and localisation: Emotional fine-tuning utlising multiple source directions,” in Audio engineering society 56th international conference: audio for games, 2015.
    [BibTeX] [Download PDF]
    @Conference{Hughes2015,
    author = {S. Hughes and G. Kearney},
    title = {Fear and localisation: {E}motional fine-tuning utlising multiple source directions},
    booktitle = {Audio Engineering Society 56th International Conference: Audio for Games},
    year = {2015},
    month = feb,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17596},
    }

  • G. Kearney and T. Doyle, “A virtual loudspeaker database for ambisonics research,” in International conference on spatial audio (icsa), 2015.
    [BibTeX]
    @InProceedings{Kearney2015,
    author = {G. Kearney and T. Doyle},
    title = {A virtual loudspeaker database for ambisonics research},
    booktitle = {International Conference on Spatial Audio (ICSA)},
    year = {2015},
    month = sep,
    publisher = {Verband Deutscher Tonmeister e.V.},
    day = {8},
    }

  • G. Kearney and T. Doyle, “On prediction of auditory height in ambisonics,” in Tagungsbericht icsa 2015:, 2015.
    [BibTeX]
    @InProceedings{Kearney2015a,
    author = {G. Kearney and T. Doyle},
    title = {On prediction of auditory height in ambisonics},
    booktitle = {Tagungsbericht ICSA 2015:},
    year = {2015},
    month = sep,
    publisher = {Verband Deutscher Tonmeister e.V.},
    day = {8},
    }

  • G. Kearney and T. Doyle, “An HRTF database for virtual loudspeaker rendering,” in Audio Engineering Society 139th Convention, 2015.
    [BibTeX] [Download PDF]
    @Conference{Kearney2015b,
    author = {Kearney, Gavin and Doyle, Tony},
    title = {An {HRTF} database for virtual loudspeaker rendering},
    booktitle = {{Audio Engineering Society 139th Convention}},
    year = {2015},
    month = oct,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17980},
    }

  • G. Kearney, X. Liu, A. Manns, and M. Gorzel, “Auditory distance perception with static and dynamic binaural rendering,” in Audio engineering society 57th international conference: the future of audio entertainment technology & cinema, television and the internet, 2015.
    [BibTeX] [Download PDF]
    @Conference{Kearney2015c,
    author = {G. Kearney and X. Liu and A. Manns and M. Gorzel},
    title = {Auditory distance perception with static and dynamic binaural rendering},
    booktitle = {Audio Engineering Society 57th International Conference: The Future of Audio Entertainment Technology \& Cinema, Television and the Internet},
    year = {2015},
    month = mar,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17603},
    }

  • G. Kearney and T. Doyle, “Height perception in ambisonic based binaural decoding,” in Audio Engineering Society 139th Convention, 2015.
    [BibTeX] [Download PDF]
    @Conference{Kearney2015d,
    author = {G. Kearney and T. Doyle},
    title = {Height perception in ambisonic based binaural decoding},
    booktitle = {{Audio Engineering Society 139th Convention}},
    year = {2015},
    month = oct,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17979},
    }

  • R. McIlraith, P. Walton, and J. Brereton, “The spatialised sonification of Drug-Enzyme interactions,” in International conference on auditory display (ICAD), Graz, Austria, 2015, p. 323–324.
    [BibTeX]
    @InProceedings{McIlraith2015-uz,
    author = {McIlraith, Rick and Walton, Paul and Brereton, Jude},
    title = {The Spatialised Sonification of {Drug-Enzyme} Interactions},
    booktitle = {International Conference on Auditory Display ({ICAD})},
    year = {2015},
    pages = {323--324},
    address = {Graz, Austria},
    conference = {ICAD 2015},
    institution = {Georgia Institute of Technology},
    }

  • J. W. Newbold, A. Hunt, and J. Brereton, “Chemical spectral analysis through sonification,” in International conference on auditory display (icad), 2015.
    [BibTeX]
    @InProceedings{Newbold2015-gh,
    author = {Newbold, Joseph W. and Hunt, Andy and Brereton, Jude},
    title = {Chemical spectral analysis through sonification},
    booktitle = {International Conference on Auditory Display (ICAD)},
    year = {2015},
    conference = { ICAD--2015},
    location = { Graz, Austria},
    }

  • D. Satongar, C. Pike, Y. Lam, and A. I. Tew, “The influence of headphones on the localisation of external loudspeaker sources,” Journal of the audio engineering society, vol. 63, iss. 10, p. 799–810, 2015. doi:10.17743/jaes.2015.0072
    [BibTeX] [Abstract]

    When validating systems that use headphones to synthesise virtual soundsources, a direct comparison between virtual and real sources is sometimes needed. This paper considers the passive influence of headphones on the sound transmission and perception of external loudspeaker sources, for which physical measurements and behavioral data have been obtained. Physical measurements of the effect of a number of headphone models are given and analysed using an auditory filter bank and binaural cue extraction. These highlighted that all of the headphones had an effect on localisation cues and repositioning had a measurable effect. A localisation test was undertaken using one of the best performing headphones from the measurements. It was found thatthe presence of the headphones caused a small increase in localisation error and that the process of judging source location was different, highlighting a possible increase in the complexity of the localisation task.

    @Article{Satongar2015,
    author = {D. Satongar and C. Pike and Y. Lam and A. I. Tew},
    title = {The influence of headphones on the localisation of external loudspeaker sources},
    journal = {Journal of the Audio Engineering Society},
    year = {2015},
    volume = {63},
    number = {10},
    pages = {799--810},
    issn = {0004-7554},
    abstract = {When validating systems that use headphones to synthesise virtual soundsources, a direct comparison between virtual and real sources is sometimes needed. This paper considers the passive influence of headphones on the sound transmission and perception of external loudspeaker sources, for which physical measurements and behavioral data have been obtained. Physical measurements of the effect of a number of headphone models are given and analysed using an auditory filter bank and binaural cue extraction. These highlighted that all of the headphones had an effect on localisation cues and repositioning had a measurable effect. A localisation test was undertaken using one of the best performing headphones from the measurements. It was found thatthe presence of the headphones caused a small increase in localisation error and that the process of judging source location was different, highlighting a possible increase in the complexity of the localisation task.},
    doi = {10.17743/jaes.2015.0072},
    }

  • F. Stevens and D. T. Murphy, “Acoustic source localisation in an urban environment using early reflection information,” in Euronoise, Maastricht, The Netherlands, 2015.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2015,
    author = {F. Stevens and D. T. Murphy},
    title = {Acoustic source localisation in an urban environment using early reflection information},
    booktitle = {Euronoise},
    year = {2015},
    address = {Maastricht, The Netherlands},
    url = {https://www.conforg.fr/euronoise2015/proceedings/data/articles/000020.pdf},
    }

  • R. R. Vos, H. Daffern, and D. M. Howard, “A pilot study: investigating formant tuning in girl choristers through wide-band excitation,” in The voice foundation symposium, 2015.
    [BibTeX]
    @Conference{Vos2015,
    author = {R. R. Vos and H. Daffern and D. M. Howard},
    title = {A pilot study: Investigating formant tuning in girl choristers through wide-band excitation},
    booktitle = {The Voice Foundation Symposium},
    year = {2015},
    note = {The Voice Foundation Symposium ; Conference date: 26-05-2015 Through 31-05-2015},
    language = {English},
    }

  • J. Yang and A. Hunt, “Real-time sonification of biceps curl exercise using muscular activity and kinematics,” in International conference on auditory display (icad), 2015.
    [BibTeX]
    @InProceedings{Yang2015,
    author = {Yang, Jiajun and Hunt, Andy},
    title = {Real-time sonification of biceps curl exercise using muscular activity and kinematics},
    booktitle = {International Conference on Auditory Display (ICAD)},
    year = {2015},
    organization = {Georgia Institute of Technology},
    }

  • J. Yang, “Enhancing the Quality and Motivation of Physical Exercise Using Real-Time Sonification,” PhD Thesis, 2015.
    [BibTeX] [Download PDF]
    @PhdThesis{Yang2015a,
    author = {Jiajun Yang},
    title = {{Enhancing the Quality and Motivation of Physical Exercise Using Real-Time Sonification}},
    school = {Department of Electronic Engineering, University of York},
    year = {2015},
    url = {http://etheses.whiterose.ac.uk/10396/},
    }

2014

  • T. Arvanitidis, “Spectral modelling for transformation and separation of audio signals,” Master Thesis, 2014.
    [BibTeX] [Download PDF]
    @MastersThesis{Arvanitidis2014,
    author = {Arvanitidis, Thomas},
    title = {Spectral Modelling for Transformation and Separation of Audio Signals},
    school = {Department of Electronic Engineering, University of York},
    year = {2014},
    url = {http://etheses.whiterose.ac.uk/9070/},
    }

  • J. Brereton, “Singing in Space(s): Singing performance in real and virtual acoustic environments – Singers’ evaluation, performance analysis and listeners’ perception,” PhD Thesis, 2014.
    [BibTeX] [Download PDF]
    @PhdThesis{Brereton2014,
    author = {Jude Brereton},
    title = {{Singing in Space(s): Singing performance in real and virtual acoustic environments - Singers' evaluation, performance analysis and listeners' perception}},
    school = {University of York},
    year = {2014},
    url = {http://etheses.whiterose.ac.uk/7877/},
    }

  • D. Corrigan, F. Pitié, M. Gorzel, G. Kearney, V. Morris, A. Rankin, M. Linnane, M. O’Dea, C. Lee, and A. Kokaram, “A video database for the development of stereo-3D post-production algorithms,” Journal of virtual reality and broadcasting, vol. 10(2013), iss. 3, 2014.
    [BibTeX] [Abstract] [Download PDF]

    This paper introduces a database of freely available stereo-3D content designed to facilitate research in stereo post-production. It describes the structure and content of the database and provides some details about how the material was gathered. The database includes examples of many of the scenarios characteristic to broadcast footage. Material was gathered at different locations including a studio with controlled lighting and both indoor and outdoor on-location sites with more restricted lighting control. The database also includes video sequences with accompanying 3D audio data recorded in an Ambisonics format. An intended consequence of gathering the material is that the database contains examples of degradations that would be commonly present in real-world scenarios. This paper describes one such artefact caused by uneven exposure in the stereo views, causing saturation in the over-exposed view. An algorithm for the restoration of this artefact is proposed in order to highlight the usefuiness of the database.

    @Article{Corrigan2014,
    author = {D. Corrigan and F. Piti{\'e} and M. Gorzel and G. Kearney and V. Morris and A. Rankin and M. Linnane and M. O'Dea and C. Lee and A. Kokaram},
    title = {A video database for the development of stereo-3{D} post-production algorithms},
    journal = {Journal of Virtual Reality and Broadcasting},
    year = {2014},
    volume = {10(2013)},
    number = {3},
    issn = {1860-2037},
    abstract = {This paper introduces a database of freely available stereo-3D content designed to facilitate research in stereo post-production. It describes the structure and content of the database and provides some details about how the material was gathered. The database includes examples of many of the scenarios characteristic to broadcast footage. Material was gathered at different locations including a studio with controlled lighting and both indoor and outdoor on-location sites with more restricted lighting control. The database also includes video sequences with accompanying 3D audio data recorded in an Ambisonics format. An intended consequence of gathering the material is that the database contains examples of degradations that would be commonly present in real-world scenarios. This paper describes one such artefact caused by uneven exposure in the stereo views, causing saturation in the over-exposed view. An algorithm for the restoration of this artefact is proposed in order to highlight the usefuiness of the database.},
    keywords = {post production; stereo 3D; video-database},
    url = {http://nbn-resolving.de/urn:nbn:de:0009-6-37805},
    }

  • B. Delvaux, “The spectral impact of the hypopharyngeal cavaties on the singing voice,” PhD Thesis, 2014.
    [BibTeX] [Download PDF]
    @PhdThesis{Delvaux2014,
    author = {Delvaux, Bertrand},
    title = {The Spectral Impact of the Hypopharyngeal Cavaties on the Singing Voice},
    school = {Department of Electronic Engineering, University of York},
    year = {2014},
    url = {http://etheses.whiterose.ac.uk/6534/},
    }

  • M. Gorzel, G. Kearney, and F. Boland, “Investigation of ambisonic rendering of elevated sound sources,” in Audio engineering society 55th international conference: spatial audio, 2014.
    [BibTeX] [Download PDF]
    @Conference{Gorzel2014,
    author = {M. Gorzel and G. Kearney and F. Boland},
    title = {Investigation of ambisonic rendering of elevated sound sources},
    booktitle = {Audio Engineering Society 55th International Conference: Spatial Audio},
    year = {2014},
    month = aug,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17385},
    }

  • C. T. Jin, P. Guillon, N. Epain, R. Zolfaghari, V. van Schaik, A. I. Tew, C. Hetherington, and J. Thorpe, “Creating the Sydney York morphological and acoustic recordings of ears database,” Ieee transactions on multimedia, vol. 16, iss. 1, p. 37–46, 2014. doi:10.1109/TMM.2013.2282134
    [BibTeX]
    @Article{Jin2014,
    author = {C. T. Jin and P. Guillon and N. Epain and R. Zolfaghari and V. {van Schaik} and A. I. Tew and C. Hetherington and J. Thorpe},
    title = {Creating the {S}ydney {Y}ork morphological and acoustic recordings of ears database},
    journal = {IEEE Transactions on Multimedia},
    year = {2014},
    volume = {16},
    number = {1},
    pages = {37--46},
    month = jan,
    issn = {1520-9210},
    doi = {10.1109/TMM.2013.2282134},
    keywords = {Head-related transfer function, 3D audio, morphological data, fast multipole boundary element method, 3D mesh models, virtual auditory space},
    publisher = {Institute of Electrical and Electronics Engineers Inc.},
    }

  • T. Neate, N. Degara, A. Hunt, and F. Nagel, “A generic evaluation model for auditory feedback in complex visual searches,” in International conference on auditory display (icad), 2014.
    [BibTeX]
    @InProceedings{Neate2014,
    author = {T. Neate and N. Degara and A. Hunt and F. Nagel},
    title = {A generic evaluation model for auditory feedback in complex visual searches},
    booktitle = {International Conference on Auditory Display (ICAD)},
    year = {2014},
    organization = {Georgia Institute of Technology},
    }

  • B. C. O’Toole, M. Gorzel, I. J. Kelly, L. O’Sullivan, G. Kearney, and F. Boland, “Virtual 5.1 surround sound localization using head-tracking devices,” in 25th iet irish signals systems conference 2014 and 2014 china-ireland international conference on information and communications technologies (issc 2014/ciict 2014), 2014, p. 41–46. doi:10.1049/cp.2014.0656
    [BibTeX]
    @InProceedings{OToole2014,
    author = {B. C. O'Toole and M. Gorzel and I. J. Kelly and L. O'Sullivan and G. Kearney and F. Boland},
    title = {Virtual 5.1 surround sound localization using head-tracking devices},
    booktitle = {25th IET Irish Signals Systems Conference 2014 and 2014 China-Ireland International Conference on Information and Communications Technologies (ISSC 2014/CIICT 2014)},
    year = {2014},
    pages = {41--46},
    month = jun,
    doi = {10.1049/cp.2014.0656},
    keywords = {audio signal processing;loudspeakers;inertial measurement unit;Oculus Rift;Microsoft Kinect face-tracking;exploratory head movements;head-tracking devices;virtual source localization accuracy;sound localization accuracy;virtual 5.1 surround sound localization;virtual 5.1 loudspeaker arrays;surround sound;binaural;head tracking;spatial audio},
    }

  • C. Pike, F. Melchior, and A. I. Tew, “Assessing the plausibility of non-individualised dynamic binaural synthesis in a small room,” in Audio engineering society 55th international conference: spatial audio, 2014.
    [BibTeX] [Abstract] [Download PDF]

    This paper presents a subjective assessment of the plausibility of a non-individualised dynamic binaural sound system, created using a dataset of binaural room impulse responses measured with a dummy head microphone. A signal detection theory analysis was carried out on the results, to assess the sensory difference between the simulation and reality. The design and objective validation of the system is also presented. A small but meaningful sensory difference was observed between real and binaurally simulated loudspeaker sounds.

    @InProceedings{Pike2014,
    author = {C. Pike and F. Melchior and A. I. Tew},
    title = {Assessing the plausibility of non-individualised dynamic binaural synthesis in a small room},
    booktitle = {Audio Engineering Society 55th International Conference: Spatial Audio},
    year = {2014},
    abstract = {This paper presents a subjective assessment of the plausibility of a non-individualised dynamic binaural sound system, created using a dataset of binaural room impulse responses measured with a dummy head microphone. A signal detection theory analysis was carried out on the results, to assess the sensory difference between the simulation and reality. The design and objective validation of the system is also presented. A small but meaningful sensory difference was observed between real and binaurally simulated loudspeaker sounds.},
    day = {26},
    isbn = {9781634397599},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17353},
    }

  • J. Rees-Jones, J. Brereton, and D. Murphy, “Spatial audio quality and user preference of listening systems in video games,” in Dafx 2014, 2014.
    [BibTeX] [Download PDF]
    @InProceedings{Rees-Jones2014,
    author = {Rees-Jones, Joe and Brereton, Jude and Murphy, Damian},
    title = {Spatial Audio Quality and User Preference of Listening Systems in Video Games},
    booktitle = {DAFx 2014},
    year = {2014},
    url = {https://www.ntnu.edu/documents/1001201110/1266017954/DAFx-15_submission_35.pdf},
    }

  • A. Rugchatjaroen, “Articulatory-Based English Consonant Synthesis in 2-D Digital Waveguide Mesh,” PhD Thesis, 2014.
    [BibTeX] [Download PDF]
    @PhdThesis{Rugchatjaroen2014,
    author = {Rugchatjaroen, Anocha},
    title = {{Articulatory-Based English Consonant Synthesis in 2-D Digital Waveguide Mesh}},
    school = {University of York},
    year = {2014},
    url = {http://etheses.whiterose.ac.uk/7109/},
    }

  • F. Stevens and D. T. Murphy, “Spatial impulse response measurement in an urban environment,” in Audio engineering society 55th international conference: spatial audio, Helsinki, Finland, 2014.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2014,
    author = {Stevens, F. and Murphy, D. T.},
    title = {Spatial impulse response measurement in an urban environment},
    booktitle = {Audio Engineering Society 55th International Conference: Spatial Audio},
    year = {2014},
    address = {Helsinki, Finland},
    month = aug,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17355},
    }

  • D. Zantalis and J. J. Wells, “Semi- Blind Audio Source Separation of Linearly Mixed Two-Channel Recordings via Guided Matching Pursuit,” in Dafx 2014, 2014.
    [BibTeX] [Download PDF]
    @InProceedings{Zantalis2014,
    author = {Dimitri Zantalis and Jeremy J. Wells},
    title = {{Semi- Blind Audio Source Separation of Linearly Mixed Two-Channel Recordings via Guided Matching Pursuit}},
    booktitle = {DAFx 2014},
    year = {2014},
    url = {http://www.dafx14.fau.de/papers/dafx14_dimitri_zantalis_semi_blind_audio_source_s.pdf},
    }

  • R. Zolfaghari, N. Epain, C. Jin, A. I. Tew, and J. Glaunès, “A multiscale LDDMM template algorithm for studying ear shape variations,” in Ieee international conference on signal processing and communication systems (icspcs), 2014. doi:10.1109/ICSPCS.2014.7021100
    [BibTeX] [Abstract]

    This paper describes a method to establish an average human ear shape across a population of ears by se-quentially applying the Large Deformation Diffeomorphic Metric Mapping (LDDMM) framework at successively smaller physical scales. Determining such a population average ear shape, also referred to here as a template ear, is an essential step in studying the statistics of ear shapes because it allows the variations in ears to be studied relative to a common template shape. Our interest in the statistics of ear shapes stems from our desire to understand the relationship between ear morphology and the head-related impulse response (HRIR) filters that are essential for rendering 3D audio over headphones. The shape of the ear varies among listeners and is as individualized as a fingerprint. Because the acoustic filtering properties of the ears depend on their shape, the HRIR filters required for rendering 3D audio are also individualized. The contribution of this work is the demonstration of a sequential multiscale approach to creating a population template ear shape using the LDDMM framework. In particular we apply our sequential multiscale algorithm to a small population of synthetic ears in order to analyse its performance given a known reference ear shape.

    @Conference{Zolfaghari2014,
    author = {R. Zolfaghari and N. Epain and C. Jin and A. I. Tew and J. Glaun{\`e}s},
    title = {A multiscale {LDDMM} template algorithm for studying ear shape variations},
    booktitle = {IEEE International Conference on Signal Processing and Communication Systems (ICSPCS)},
    year = {2014},
    month = dec,
    note = {INSPEC accession number 14881796; IEEE 8th International Conference on Signal Processing and Communication Systems (ICSPCS), 2014, , QLD ; Conference date: 15-12-2014 Through 17-12-2014},
    abstract = {This paper describes a method to establish an average human ear shape across a population of ears by se-quentially applying the Large Deformation Diffeomorphic Metric Mapping (LDDMM) framework at successively smaller physical scales. Determining such a population average ear shape, also referred to here as a template ear, is an essential step in studying the statistics of ear shapes because it allows the variations in ears to be studied relative to a common template shape. Our interest in the statistics of ear shapes stems from our desire to understand the relationship between ear morphology and the head-related impulse response (HRIR) filters that are essential for rendering 3D audio over headphones. The shape of the ear varies among listeners and is as individualized as a fingerprint. Because the acoustic filtering properties of the ears depend on their shape, the HRIR filters required for rendering 3D audio are also individualized. The contribution of this work is the demonstration of a sequential multiscale approach to creating a population template ear shape using the LDDMM framework. In particular we apply our sequential multiscale algorithm to a small population of synthetic ears in order to analyse its performance given a known reference ear shape.},
    doi = {10.1109/ICSPCS.2014.7021100},
    isbn = {978-1-4799-5255-7},
    keywords = {ear shape, morphology, head-related transfer function, SYMARE},
    }

  • J. T. F. Harrison, “The Development of a Parametric Real-Time Voice Source Model for use with Vocal Tract Modelling Synthesis on Portable Devices,” Master Thesis, 2014.
    [BibTeX] [Download PDF]
    @MastersThesis{,
    author = {Harrison, Jacob T. F.},
    title = {{The Development of a Parametric Real-Time Voice Source Model for use with Vocal Tract Modelling Synthesis on Portable Devices}},
    school = {University of York},
    year = {2014},
    url = {http://etheses.whiterose.ac.uk/8334/},
    }

2013

  • H. Daffern and J. Brereton, “Testing a new protocol to measure tuning response behaviour in solo voice ensemble singing,” in Sound and music computing, 2013.
    [BibTeX]
    @InProceedings{Daffern2013,
    author = {Daffern, Helena and Brereton, Jude},
    title = {Testing a new protocol to measure tuning response behaviour in solo voice ensemble singing},
    booktitle = {Sound and Music Computing},
    year = {2013},
    conference = {Sound and Music Computing Conference 2013},
    location = {Stockholm, Sweden},
    }

  • H. Daffern and D. M. Howard, “Analysing changing vibrato behaviour in solo voice ensemble singing,” in Pan european voice conference, 2013.
    [BibTeX] [Abstract]

    Perceptually vibrato is known to be an important characteristic in choral singing, with conductors specifically directing singers in their use of vibrato when considering ‘choral blend’, tuning, expression and taste. Particularly in Renaissance and Baroque repertoire vibrato is conventionally used as an ornament, and current convention in vocal ensemble performance is to restrict the use of vibrato for musical affect at specific moments in the music, such as at the resolution of a suspension. Whilst vibrato in solo singing is well-researched, consideration of vibrato behaviour of individuals in an ensemble setting is still in its infancy. This is in part due to the difficulty of isolating the individual voices for analysis. In order to construct a method for analysing vocal characteristics of individual singers within a group, a protocol was designed and pilot tested. Laryngograph and audio recordings were made of student vocal quartets from the University of York. The laryngograph data for each singer was analysed to extract fundamental frequency data. The fundamental frequency data was then used to analyse vibrato characteristics of the individual singers, considering in particular adapting vibrato for ‘choral blend’ and the use of vibrato as a musical ornament in suspensions. The perceptual relevance of the vibrato behaviour was assessed through a listening test. The results of vibrato characteristics of individual singers are considered alongside their perceptual relevance. The accuracy of the data is also considered alongside the implications of applying this protocol to analyse other vocal characteristics in choral singers.

    @inproceedings{Daffern2013b,
    title = "Analysing changing vibrato behaviour in solo voice ensemble singing",
    abstract = "Perceptually vibrato is known to be an important characteristic in choral singing, with conductors specifically directing singers in their use of vibrato when considering ‘choral blend’, tuning, expression and taste. Particularly in Renaissance and Baroque repertoire vibrato is conventionally used as an ornament, and current convention in vocal ensemble performance is to restrict the use of vibrato for musical affect at specific moments in the music, such as at the resolution of a suspension. Whilst vibrato in solo singing is well-researched, consideration of vibrato behaviour of individuals in an ensemble setting is still in its infancy. This is in part due to the difficulty of isolating the individual voices for analysis. In order to construct a method for analysing vocal characteristics of individual singers within a group, a protocol was designed and pilot tested. Laryngograph and audio recordings were made of student vocal quartets from the University of York. The laryngograph data for each singer was analysed to extract fundamental frequency data. The fundamental frequency data was then used to analyse vibrato characteristics of the individual singers, considering in particular adapting vibrato for ‘choral blend’ and the use of vibrato as a musical ornament in suspensions. The perceptual relevance of the vibrato behaviour was assessed through a listening test. The results of vibrato characteristics of individual singers are considered alongside their perceptual relevance. The accuracy of the data is also considered alongside the implications of applying this protocol to analyse other vocal characteristics in choral singers.",
    author = "H. Daffern and D. M. Howard",
    year = "2013",
    language = "English",
    booktitle = "Pan European Voice Conference",
    }

  • A. Foteinou, “Perception of Objective Parameter Variations in Virtual Acoustic Spaces,” PhD Thesis, 2013.
    [BibTeX] [Download PDF]
    @PhdThesis{Foteinou2013,
    author = {Foteinou, Aglaia},
    title = {{Perception of Objective Parameter Variations in Virtual Acoustic Spaces}},
    school = {University of York},
    year = {2013},
    url = {http://etheses.whiterose.ac.uk/5628/},
    }

  • S. Harriet, “Application of Auralisation and Soundscape Methodologies to Environmental Noise,” PhD Thesis, 2013.
    [BibTeX] [Download PDF]
    @PhdThesis{Harriet2013,
    author = {Sorriel Harriet},
    title = {{Application of Auralisation and Soundscape Methodologies to Environmental Noise}},
    school = {University of York},
    year = {2013},
    url = {http://etheses.whiterose.ac.uk/4724/},
    }

  • D. M. Howard, H. Daffern, and J. Brereton, “Four-part choral synthesis system for investigating intonation in a cappella choral singing,” Logopedics phoniatrics vocology, vol. 38, iss. 3, p. 135–142, 2013.
    [BibTeX]
    @Article{Howard2013-if,
    author = {Howard, David M. and Daffern, Helena and Brereton, Jude},
    title = {Four-part choral synthesis system for investigating intonation in a cappella choral singing},
    journal = {Logopedics Phoniatrics Vocology},
    year = {2013},
    volume = {38},
    number = {3},
    pages = {135--142},
    month = oct,
    language = {en},
    }

  • G. Kearney, “Sound field rendering for distributed audiences,” in Audio engineering society 52nd international conference: sound field control – engineering and perception, 2013.
    [BibTeX] [Download PDF]
    @Conference{Kearney2013,
    author = {G. Kearney},
    title = {Sound field rendering for distributed audiences},
    booktitle = {Audio Engineering Society 52nd International Conference: Sound Field Control - Engineering and Perception},
    year = {2013},
    month = sep,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=16902},
    }

  • C. Kirchhuebel, “The acoustic and temporal characteristics of deceptive speech,” PhD Thesis, 2013.
    [BibTeX] [Download PDF]
    @PhdThesis{Kirchhuebel2013,
    author = {Kirchhuebel, Christin},
    title = {{The acoustic and temporal characteristics of deceptive speech}},
    school = {University of York},
    year = {2013},
    url = {http://etheses.whiterose.ac.uk/4790/},
    }

  • M. Lopez, S. Pauletto, and G. Kearney, “The application of impulse response measurement techniques to the study of the acoustics of Stonegate, a performance space used in medieval English drama,” Acta acustica united with acustica, vol. 99, iss. 1, p. 98–109, 2013.
    [BibTeX] [Download PDF]
    @Article{Lopez2013,
    author = {Lopez, Mariana and Pauletto, Sandra and Kearney, Gavin},
    title = {The application of impulse response measurement techniques to the study of the acoustics of {S}tonegate, a performance space used in medieval {E}nglish drama},
    journal = {Acta Acustica united with Acustica},
    year = {2013},
    volume = {99},
    number = {1},
    pages = {98--109},
    publisher = {S. Hirzel Verlag},
    url = { https://www.ingentaconnect.com/content/dav/aaua/2013/00000099/00000001/art00014 },
    }

  • P. Lunn and A. Hunt, “Phantom signals: Erroneous perception observed during the audification of radio astronomy data,” in International conference on auditory display (icad), 2013.
    [BibTeX]
    @InProceedings{Lunn2013,
    author = {P. Lunn and A. Hunt},
    title = {Phantom signals: {E}rroneous perception observed during the audification of radio astronomy data},
    booktitle = {International Conference on Auditory Display (ICAD)},
    year = {2013},
    organization = {Georgia Institute of Technology},
    }

  • D. Satongar, C. Pike, Y. Lam, and A. I. Tew, “On the influence of headphones on localisation of loudspeaker sources,” in Audio Engineering Society 135th Convention, 2013, p. 1–18.
    [BibTeX] [Abstract] [Download PDF]

    When validating systems that use headphones to synthesise virtual sound sources, a direct comparison between virtual and real sources is sometimes needed. This paper presents objective and subjective measurements of the influence of headphones on external loudspeaker sources. Objective measurements of the effect of a number of headphone models are given and analysed using an auditory filter bank and binaural cue extraction. Objective results highlight that all of the headphones had an effect on localisation cues. A subjective localisation test was undertaken using one of the best performing headphones from the measurements. It was found that the presence of the headphones caused a small increase in localisation error but also that the process of judging source location was different, highlighting a possible increase in the complexity of the localisation task.

    @Conference{Satongar2013,
    author = {D. Satongar and C. Pike and Y. Lam and A. I. Tew},
    title = {On the influence of headphones on localisation of loudspeaker sources},
    booktitle = {{Audio Engineering Society 135th Convention}},
    year = {2013},
    pages = {1--18},
    month = oct,
    note = {Also published as BBC Research \& Development White Paper WHP 276; 135th AES Convention ; Conference date: 17-10-2013 Through 20-10-2013},
    abstract = {When validating systems that use headphones to synthesise virtual sound sources, a direct comparison between virtual and real sources is sometimes needed. This paper presents objective and subjective measurements of the influence of headphones on external loudspeaker sources. Objective measurements of the effect of a number of headphone models are given and analysed using an auditory filter bank and binaural cue extraction. Objective results highlight that all of the headphones had an effect on localisation cues. A subjective localisation test was undertaken using one of the best performing headphones from the measurements. It was found that the presence of the headphones caused a small increase in localisation error but also that the process of judging source location was different, highlighting a possible increase in the complexity of the localisation task.},
    day = {17},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17003},
    }

  • J. Yang and A. Hunt, “Sonic trainer: Real-time sonification of muscular activity and limb positions in general physical exercise,” in Ison 2013, 4th interactive sonification workshop, 2013.
    [BibTeX]
    @InProceedings{Yang2013,
    author = {J. Yang and A. Hunt},
    title = {Sonic trainer: {R}eal-time sonification of muscular activity and limb positions in general physical exercise},
    booktitle = {ISon 2013, 4th Interactive Sonification Workshop},
    year = {2013},
    }

  • T. Neate, “Interactive Spatial Auditory Display of Graphical Data,” Master Thesis, 2013.
    [BibTeX] [Download PDF]
    @MastersThesis{,
    author = {Neate, Timothy},
    title = {{Interactive Spatial Auditory Display of Graphical Data}},
    school = {University of York},
    year = {2013},
    url = {http://etheses.whiterose.ac.uk/5524/},
    }

  • N. F. Arner, “Investigation of the use of Multi-Touch Gestures in Music Interaction,” Master Thesis, 2013.
    [BibTeX] [Download PDF]
    @MastersThesis{,
    author = {Arner, Nicholas Franklin},
    title = {{Investigation of the use of Multi-Touch Gestures in Music Interaction}},
    school = {University of York},
    year = {2013},
    url = {http://etheses.whiterose.ac.uk/5312/},
    }

2012

  • J. Brereton, D. Murphy, and D. Howard, “A loudspeaker-based room acoustics simulation for real-time musical performance,” in Audio engineering society UK 25th conference: spatial audio in today’s 3D world, 2012.
    [BibTeX] [Download PDF]
    @InProceedings{Brereton2012-hl,
    author = {Brereton, Jude and Murphy, Damian and Howard, David},
    title = {A loudspeaker-based room acoustics simulation for real-time musical performance},
    booktitle = {Audio Engineering Society {UK} 25th Conference: Spatial Audio in Today's {3D} World},
    year = {2012},
    publisher = {aes.org},
    institution = {Audio Engineering Society},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18116},
    }

  • R. Bresin, T. Hermann, and A. Hunt, Interactive sonificationSpringer, 2012.
    [BibTeX]
    @Misc{Bresin2012,
    author = {Bresin, Roberto and Hermann, Thomas and Hunt, Andy},
    title = {Interactive sonification},
    year = {2012},
    publisher = {Springer},
    }

  • H. Daffern and D. M. Howard, “Spectral characteristics of the baroque trumpet: a case study,” in Acoustics, 2012, p. 3969–3974.
    [BibTeX] [Abstract]

    The baroque trumpet like many reconstructed or original historical instruments is very difficult to play compared to its modern day equivalent. It is commonly accepted that historically informed performances need a specialist baroque trumpeter performing on a specialist instrument in order to achieve the desired sound. Particularly, there are certain timbral characteristics and expectations concerning the range of dynamics employed that are highlighted by today’s early music conductors and players as being unique to and expected of the baroque trumpet. An opportunity arose to record a world-renowned baroque trumpeter playing original trumpets from 1780, 1788, 1912 and 1967 in the fully (6-sided) acoustic anechoic chamber at the University of York. Due to his strong instinct as a player that the mouthpiece is the most significant timbral characteristic of the instrument, performances on the later two trumpets were recorded using two different mouthpieces. The spectral characteristics of each of the instruments and the impact of changing the mouthpiece are analysed in terms of the spectral correlates of audible differences integral to the sound quality of each.

    @Conference{Daffern2012,
    author = {H. Daffern and D. M. Howard},
    title = {Spectral characteristics of the baroque trumpet: a case study},
    booktitle = {Acoustics},
    year = {2012},
    pages = {3969--3974},
    month = {4},
    note = {Proceedings of the Acoustics 2012 ; Conference date: 23-04-2012 Through 27-04-2012},
    abstract = {The baroque trumpet like many reconstructed or original historical instruments is very difficult to play compared to its modern day equivalent. It is commonly accepted that historically informed performances need a specialist baroque trumpeter performing on a specialist instrument in order to achieve the desired sound. Particularly, there are certain timbral characteristics and expectations concerning the range of dynamics employed that are highlighted by today’s early music conductors and players as being unique to and expected of the baroque trumpet. An opportunity arose to record a world-renowned baroque trumpeter playing original trumpets from 1780, 1788, 1912 and 1967 in the fully (6-sided) acoustic anechoic chamber at the University of York. Due to his strong instinct as a player that the mouthpiece is the most significant timbral characteristic of the instrument, performances on the later two trumpets were recorded using two different mouthpieces. The spectral characteristics of each of the instruments and the impact of changing the mouthpiece are analysed in terms of the spectral correlates of audible differences integral to the sound quality of each.},
    day = {27},
    language = {English},
    }

  • H. Daffern, J. S. Brereton, and D. M. Howard, “The impact of vibrato usage on the perception of pitch in early music compared to opera,” in Acoustics, 2012, p. 3949–3954.
    [BibTeX] [Abstract]

    Previous studies on the pitch of long-duration vibrato tones have typically used synthesised modulator tones to assess the perceived pitch in relation to its arithmetic or geometric mean fundamental frequency. In this study a listening test was conducted with expert listener subjects matching recorded vibrato tones sung by professional singers using a method of adjustment and free response paradigm as employed by van Besouw et al. Example tones selected from recordings by 16 singers were used in the test, 8 of whom were employed at the Royal Opera House, Covent Garden, and the remaining singers specialised in Early Music Performance. A previous study by Daffern (2008) in the vibrato usage by these singers shows a noticeable use difference in the use of vibrato between these performance groups, particularly in extent throughout long tones. The impact of these differences in vibrato will be assessed in terms of the perception of fundamental frequency in long tones as performed by the two groups of singer, and the effectiveness of using real recordings to assess listener perception of vibrato tones will be discussed.

    @Conference{Daffern2012a,
    author = {H. Daffern and J. S. Brereton and D. M. Howard},
    title = {The impact of vibrato usage on the perception of pitch in early music compared to opera},
    booktitle = {Acoustics},
    year = {2012},
    pages = {3949--3954},
    month = {4},
    note = {Proceedings of the Acoustics 2012 ; Conference date: 23-04-2012 Through 27-04-2012},
    abstract = {Previous studies on the pitch of long-duration vibrato tones have typically used synthesised modulator tones to assess the perceived pitch in relation to its arithmetic or geometric mean fundamental frequency. In this study a listening test was conducted with expert listener subjects matching recorded vibrato tones sung by professional singers using a method of adjustment and free response paradigm as employed by van Besouw et al. Example tones selected from recordings by 16 singers were used in the test, 8 of whom were employed at the Royal Opera House, Covent Garden, and the remaining singers specialised in Early Music Performance. A previous study by Daffern (2008) in the vibrato usage by these singers shows a noticeable use difference in the use of vibrato between these performance groups, particularly in extent throughout long tones. The impact of these differences in vibrato will be assessed in terms of the perception of fundamental frequency in long tones as performed by the two groups of singer, and the effectiveness of using real recordings to assess listener perception of vibrato tones will be discussed.},
    day = {27},
    keywords = {vibrato, acoustics, Perception, singing analysis, singing vocal performance},
    language = {English},
    }

  • M. Gorzel, D. Corrigan, J. Squires, F. Boland, and G. Kearney, “Distance perception in real and virtual environments,” in Audio engineering society uk 25th conference: spatial audio in today’s 3d world, 2012.
    [BibTeX] [Download PDF]
    @Conference{Gorzel2012,
    author = {Gorzel, Marcin and Corrigan, David and Squires, John and Boland, Frank and Kearney, Gavin},
    title = {Distance perception in real and virtual environments},
    booktitle = {Audio Engineering Society UK 25th Conference: Spatial Audio in Today's 3D World},
    year = {2012},
    month = mar,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18119},
    }

  • D. M. Howard, H. Daffern, and J. Brereton, “Quantitative voice quality analyses of a soprano singing early music in three different performance styles,” Biomedical signal processing and control, vol. 7, iss. 1, p. 58–64, 2012.
    [BibTeX]
    @Article{Howard2012-mf,
    author = {Howard, David M. and Daffern, Helena and Brereton, Jude},
    title = {Quantitative voice quality analyses of a soprano singing early music in three different performance styles},
    journal = {Biomedical Signal Processing and Control},
    year = {2012},
    volume = {7},
    number = {1},
    pages = {58--64},
    month = jan,
    keywords = {Soprano voice quality; Electrolaryngograph; Spectrograph; Hearing modelling spectrography; Vibrato; Larynx closed quotient; Early music},
    publisher = {Elsevier},
    }

  • G. Kearney, M. Gorzel, H. Rice, and F. Boland, “Distance perception in interactive virtual acoustic environments using first and higher order ambisonic sound fields,” Acta acustica united with acustica, vol. 98, iss. 1, p. 61–71, 2012.
    [BibTeX] [Download PDF]
    @Article{Kearney2012,
    author = {Kearney, Gavin and Gorzel, Marcin and Rice, Henry and Boland, Frank},
    title = {Distance perception in interactive virtual acoustic environments using first and higher order ambisonic sound fields},
    journal = {Acta Acustica united with Acustica},
    year = {2012},
    volume = {98},
    number = {1},
    pages = {61--71},
    publisher = {S. Hirzel Verlag},
    url = { https://www.researchgate.net/publication/263380387_Distance_Perception_in_Interactive_Virtual_Acoustic_Environments_using_First_and_Higher_Order_Ambisonic_Sound_Fields },
    }

  • C. Masterson, G. Kearney, M. Gorzel, and F. M. Boland, “HRIR order reduction using approximate factorization,” Ieee transactions on audio, speech, and language processing, vol. 20, iss. 6, p. 1808–1817, 2012. doi:10.1109/TASL.2012.2189565
    [BibTeX] [Download PDF]
    @Article{Masterson2012,
    author = {C. Masterson and G. Kearney and M. Gorzel and F. M. Boland},
    title = {{HRIR} order reduction using approximate factorization},
    journal = {IEEE Transactions on Audio, Speech, and Language Processing},
    year = {2012},
    volume = {20},
    number = {6},
    pages = {1808--1817},
    month = aug,
    issn = {1558-7916},
    doi = {10.1109/TASL.2012.2189565},
    keywords = {acoustic signal processing;delays;transient response;HRIR order reduction;approximate factorization;factorization technique;head related impulse responses;close approximation;factorization algorithm;direction independent components;direction dependent components;low reconstruction error;multiple similar local minima;psychoacoustic significance;regularization technique;robust algorithm;minimum phase HRIR data;very short direction-dependent components;initial delay inclusive HRIR data;initial time delay;KEMAR databases;CIPIC databases;subjective listening tests;unfactorized HRIRs;truncated HRIRs;Ambisonic based virtual loudspeaker array;Ear;Time domain analysis;Loudspeakers;Equations;Frequency domain analysis;Delay;Databases;Acoustic signal processing;head related impulse response (HRIR);factorization},
    url = { https://ieeexplore.ieee.org/document/6161609 },
    }

  • J. E. McHugh, G. Kearney, H. Rice, and F. N. Newell, “The sound of the crowd: Auditory information modulates the perceived emotion of a crowd based on bodily expressions.,” Emotion, vol. 12, iss. 1, p. 120, 2012.
    [BibTeX] [Download PDF]
    @Article{McHugh2012,
    author = {J. E. McHugh and G. Kearney and H. Rice and F. N. Newell},
    title = {The sound of the crowd: {A}uditory information modulates the perceived emotion of a crowd based on bodily expressions.},
    journal = {Emotion},
    year = {2012},
    volume = {12},
    number = {1},
    pages = {120},
    publisher = {American Psychological Association},
    url = { https://www.ncbi.nlm.nih.gov/pubmed/21875188},
    }

  • N. Paterson, G. Kearney, K. Naliuka, T. Carrigy, M. Haahr, and F. Conway, “Viking ghost hunt: creating engaging sound design for location–aware applications,” International journal of arts and technology, vol. 6, iss. 1, p. 61–82, 2012. doi:10.1504/IJART.2013.050692
    [BibTeX]
    @Article{Paterson2012,
    author = {Paterson, Natasa and Kearney, Gavin and Naliuka, Katsiaryna and Carrigy, Tara and Haahr, Mads and Conway, Fionnuala},
    title = {Viking ghost hunt: creating engaging sound design for location--aware applications},
    journal = {International Journal of Arts and Technology},
    year = {2012},
    volume = {6},
    number = {1},
    pages = {61--82},
    doi = {10.1504/IJART.2013.050692},
    publisher = {Inderscience Publishers},
    }

  • M. Speed, “Voice Synthesis using the Three-Dimensional Digital Waveguide Mesh,” PhD Thesis, 2012.
    [BibTeX] [Download PDF]
    @PhdThesis{Speed2012,
    author = {Matthew Speed},
    title = {{Voice Synthesis using the Three-Dimensional Digital Waveguide Mesh}},
    school = {University of York},
    year = {2012},
    url = {http://etheses.whiterose.ac.uk/2800/},
    }

  • A. I. Tew, C. T. Hetherington, and J. Thorpe, “Morphoacoustic perturbation analysis: Principles and validation,” in Joint meeting of the 11th congres francais d’acoustique and the 2012 annual meeting of the institute of acoustics, 2012, p. 867–872.
    [BibTeX] [Abstract] [Download PDF]

    We present a frequency domain technique for investigating the relationship between acoustic properties of the human hearing system and the morphology responsible for creating them. Exploiting reciprocity, the boundary element method is applied to determine head-related transfer functions (HRTFs) for various directions and distances from a surface mesh model of a head and pinnae. Small orthogonal surface harmonic deformations are applied to the mesh one at a time and stored in a database together with the resulting, approximately linear, changes to the HRTFs (delta HRTFs). Once the computationally intensive process of constructing the database has been completed, identifying the morphological origins of arbitrary acoustic spectral features is very rapid. The method, which we term morphoacoustic perturbation analysis in the frequency domain (MPA-FD), is outlined and a proof-of-principle implementation described. MPA-FD is demonstrated by using it to determine the regions of the pinna responsible for determining the centre frequency of an HRTF notch and a peak. The predictions show good agreement with direct acoustic measurements.

    @Conference{Tew2012,
    author = {A. I. Tew and C. T. Hetherington and J. Thorpe},
    title = {Morphoacoustic perturbation analysis: {P}rinciples and validation},
    booktitle = {Joint Meeting of the 11th Congres Francais d'Acoustique and the 2012 Annual Meeting of the Institute of Acoustics},
    year = {2012},
    pages = {867--872},
    note = {Proceedings of the Acoustics 2012 ; Conference date: 23-04-2012 Through 27-04-2012},
    abstract = {We present a frequency domain technique for investigating the relationship between acoustic properties of the human hearing system and the morphology responsible for creating them. Exploiting reciprocity, the boundary element method is applied to determine head-related transfer functions (HRTFs) for various directions and distances from a surface mesh model of a head and pinnae. Small orthogonal surface harmonic deformations are applied to the mesh one at a time and stored in a database together with the resulting, approximately linear, changes to the HRTFs (delta HRTFs). Once the computationally intensive process of constructing the database has been completed, identifying the morphological origins of arbitrary acoustic spectral features is very rapid. The method, which we term morphoacoustic perturbation analysis in the frequency domain (MPA-FD), is outlined and a proof-of-principle implementation described. MPA-FD is demonstrated by using it to determine the regions of the pinna responsible for determining the centre frequency of an HRTF notch and a peak. The predictions show good agreement with direct acoustic measurements.},
    day = {24},
    keywords = {acoustics, binaural, morphoacoustic, pinna, HRTF},
    url = {https://hal.archives-ouvertes.fr/hal-00811131},
    }

2011

  • J. Brereton, D. Murphy, and D. M. Howard, “Evaluating the Auralization of Performance Spaces and its Effect on Singing Performance,” in Audio Engineering Society 130th Convention, 2011.
    [BibTeX] [Download PDF]
    @InProceedings{Brereton2011-vz,
    author = {Brereton, Judith and Murphy, Damian and Howard, David M.},
    title = {{Evaluating the Auralization of Performance Spaces and its Effect on Singing Performance}},
    booktitle = {{Audio Engineering Society 130th Convention}},
    year = {2011},
    institution = {Audio Engineering Society},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=15847},
    }

  • H. Daffern and D. M. Howard, “Acoustic analysis and the identification of performance stress in singing.” 2011.
    [BibTeX]
    @conference{Daffern2011,
    title = "Acoustic analysis and the identification of performance stress in singing",
    author = "H. Daffern and D. M. Howard",
    year = "2011",
    month = "6",
    day = "3",
    language = "English",
    }

  • T. Hermann, A. Hunt, and J. G. Neuhoff, The Sonification Handbook, Logos Verlag Berlin, Germany, 2011.
    [BibTeX]
    @Book{Hermann2011,
    title = {The {S}onification {H}andbook},
    publisher = {Logos Verlag Berlin, Germany},
    year = {2011},
    author = {T. Hermann and A. Hunt and J. G. Neuhoff},
    }

  • M. Lopez, S. Pauletto, and G. Kearney, “Virtual acoustics and performance spaces in medieval English drama,” in Conference on the acoustics of ancient theatres, 2011.
    [BibTeX]
    @InProceedings{Lopez2011,
    author = {M. Lopez and S. Pauletto and G. Kearney},
    title = {Virtual acoustics and performance spaces in medieval {E}nglish drama},
    booktitle = {Conference on The Acoustics of Ancient Theatres},
    year = {2011},
    }

  • J. Schofield, “Real-time acoustic identification of invasive wood-boring beetles,” PhD Thesis, 2011.
    [BibTeX] [Download PDF]
    @PhdThesis{Schofield2011,
    author = {James Schofield},
    title = {{Real-time acoustic identification of invasive wood-boring beetles}},
    school = {University of York},
    year = {2011},
    url = {http://etheses.whiterose.ac.uk/1978/},
    }

  • J. Stammers, “Audio Event Classification for Urban Soundscape Analysis,” PhD Thesis, 2011.
    [BibTeX] [Download PDF]
    @PhdThesis{Stammers2011,
    author = {Jon Stammers},
    title = {{Audio Event Classification for Urban Soundscape Analysis}},
    school = {University of York},
    year = {2011},
    url = {http://etheses.whiterose.ac.uk/19142/},
    }

  • J. Cobb, “An accelerometer based gestural capture system for performer based music composition,” Master Thesis, 2011.
    [BibTeX] [Download PDF]
    @MastersThesis{,
    author = {Jon Cobb},
    title = {{An accelerometer based gestural capture system for performer based music composition}},
    school = {University of York},
    year = {2011},
    url = {http://etheses.whiterose.ac.uk/2252/},
    }

2010

  • O. Bunting, “Sparse separation of sources in 3D soundscapes,” PhD Thesis, 2010.
    [BibTeX] [Download PDF]
    @PhdThesis{Bunting2010,
    author = {Oliver Bunting},
    title = {{Sparse separation of sources in 3D soundscapes}},
    school = {University of York},
    year = {2010},
    url = {http://etheses.whiterose.ac.uk/1505/},
    }

  • D. Corrigan, P. Francois, V. Morris, A. Rankin, M. Linnane, G. Kearney, M. Gorzel, M. O’Dea, C. Lee, and A. Kokaram, “A video database for the development of stereo-3D post-production algorithms,” in Conference on visual media production (cvmp), 2010, p. 64–73.
    [BibTeX]
    @InProceedings{Corrigan2010,
    author = {D. Corrigan and P. Francois and V. Morris and A. Rankin and M. Linnane and G. Kearney and M. Gorzel and M. O'Dea and C. Lee and A. Kokaram},
    title = {A video database for the development of stereo-3{D} post-production algorithms},
    booktitle = {Conference on Visual Media Production (CVMP)},
    year = {2010},
    pages = {64--73},
    organization = {IEEE},
    }

  • H. Daffern and D. M. Howard, “Voice source comparison between modern singers of early music and opera,” Logopedics phoniatrics vocology, vol. 35, iss. 2, p. 68–73, 2010. doi:10.3109/14015439.2010.482861
    [BibTeX] [Abstract]

    An experiment was conducted comparing two subject groups, each comprised of eight professional singers specializing in a genre of classical music: early music or grand opera. Electroglottography was used to consider vocal characteristics idiomatic to each genre. Whilst there are clear differences in contact quotient between subjects, particularly when relationships between fundamental frequency (f0) and contact quotient (Qx) are considered, there is no apparent link between contact quotient behaviour and performance specialism based on the results of this sample.

    @article{Daffern2010,
    title = "Voice source comparison between modern singers of early music and opera",
    abstract = "An experiment was conducted comparing two subject groups, each comprised of eight professional singers specializing in a genre of classical music: early music or grand opera. Electroglottography was used to consider vocal characteristics idiomatic to each genre. Whilst there are clear differences in contact quotient between subjects, particularly when relationships between fundamental frequency (f0) and contact quotient (Qx) are considered, there is no apparent link between contact quotient behaviour and performance specialism based on the results of this sample.",
    keywords = "Contact quotient, early music, electrolaryngograph, opera, singing",
    author = "H. Daffern and D. M. Howard",
    year = "2010",
    month = "7",
    doi = "10.3109/14015439.2010.482861",
    language = "English",
    volume = "35",
    pages = "68--73",
    journal = "Logopedics Phoniatrics Vocology",
    issn = "1401-5439",
    publisher = "Informa Healthcare",
    number = "2",
    }

  • A. Edwards, A. Hunt, G. Hines, V. Jackson, A. Podvoiskis, R. Roseblade, and J. Stammers, “Sonification strategies for examination of biological cells,” in International conference on auditory display (icad), 2010.
    [BibTeX]
    @InProceedings{Edwards2010,
    author = {Edwards, Alistair and Hunt, Andy and Hines, Genevi{\`e}ve and Jackson, Vanessa and Podvoiskis, Alyte and Roseblade, Richard and Stammers, Jon},
    title = {Sonification strategies for examination of biological cells},
    booktitle = {International Conference on Auditory Display (ICAD)},
    year = {2010},
    organization = {Georgia Institute of Technology},
    }

  • N. Evans, “Automated Vehicle Detection and Classification using Acoustic and Seismic Signals,” PhD Thesis, 2010.
    [BibTeX] [Download PDF]
    @PhdThesis{Evans2010,
    author = {Naoko Evans},
    title = {{Automated Vehicle Detection and Classification using Acoustic and Seismic Signals}},
    school = {University of York},
    year = {2010},
    url = {http://etheses.whiterose.ac.uk/1151/},
    }

  • A. H. Moore, A. I. Tew, and R. Nicol, “An initial validation of individualized crosstalk cancellation filters for binaural perceptual experiments,” Journal of the audio engineering society, vol. 58, iss. 1/2, p. 36–45, 2010.
    [BibTeX] [Abstract] [Download PDF]

    Crosstalk cancellation provides a means of delivering binaural stimuli to a listener for psychoacoustic research which avoids many of the problems of using headphones in experiments. Using a highly sensitive discrimination paradigm, which addressed a variety of issues in previous, headphone-based experiments, this study aimed to determine whether a system using individual crosstalk cancellation filters can present binaural stimuli that are perceptually indistinguishable from a real sound source. The fast deconvolution with frequencydependent regularization method was used to design crosstalk cancellation filters. The reproduction loudspeakers were positioned at90 degrees azimuth, and the synthesized location was 0 degrees azimuth. Eight listeners were tested with noise, click trains, and pulsed tone stimuli. For the pulsed tone stimuli subjects were unable to discriminate between real and virtual sources. For the noise and click stimuli discrimination was marginally above chance, but well below the threshold of detection. That is, weak cues did exist but they were almost completely unreliable. The results suggest that this method of producing individualized crosstalk cancellation filters is suitable for binaural perceptual experiments.

    @Article{Moore2010,
    author = {A. H. Moore and A. I. Tew and R. Nicol},
    title = {An initial validation of individualized crosstalk cancellation filters for binaural perceptual experiments},
    journal = {Journal of the Audio Engineering Society},
    year = {2010},
    volume = {58},
    number = {1/2},
    pages = {36--45},
    abstract = {Crosstalk cancellation provides a means of delivering binaural stimuli to a listener for psychoacoustic research which avoids many of the problems of using headphones in experiments. Using a highly sensitive discrimination paradigm, which addressed a variety of issues in previous, headphone-based experiments, this study aimed to determine whether a system using individual crosstalk cancellation filters can present binaural stimuli that are perceptually indistinguishable from a real sound source. The fast deconvolution with frequencydependent regularization method was used to design crosstalk cancellation filters. The reproduction loudspeakers were positioned at90 degrees azimuth, and the synthesized location was 0 degrees azimuth. Eight listeners were tested with noise, click trains, and pulsed tone stimuli. For the pulsed tone stimuli subjects were unable to discriminate between real and virtual sources. For the noise and click stimuli discrimination was marginally above chance, but well below the threshold of detection. That is, weak cues did exist but they were almost completely unreliable. The results suggest that this method of producing individualized crosstalk cancellation filters is suitable for binaural perceptual experiments.},
    keywords = {audio, binaural, perceptual discrimination},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=15240},
    }

  • A. Southern, “The synthesis and auralisation of physically modelled soundfields,” PhD Thesis, 2010.
    [BibTeX]
    @PhdThesis{,
    author = {Alex Southern},
    title = {The synthesis and auralisation of physically modelled soundfields},
    school = {University of York},
    year = {2010},
    }

2009

  • H. Daffern, “Vibrato production and its impact on spectral energy in the performance of early music,” in National early music association, 2009.
    [BibTeX]
    @inproceedings{Daffern2009,
    title = "Vibrato production and its impact on spectral energy in the performance of early music",
    author = "H. Daffern",
    year = "2009",
    month = "7",
    day = "8",
    language = "English",
    booktitle = "National Early Music Association",
    }

  • D. M. Howard, J. S. Brereton, and H. Daffern, “Case study of voice quality differences in a soprano singing in different early music performance styles,” in Models and analysis of vocal emissions for biomedical applications – 6th international workshop, maveba 2009, 2009, p. 175–178.
    [BibTeX] [Abstract]

    This paper considers the characteristics of three differing styles of singing early music, as characterized by Richard Bethell [1] of the National Early Music Association, UK. In particular, the sung outputs from a postgraduate soprano who was practiced in singing all three styles are analysed along with the output from an electrolaryngograph which provides data on cycle-by-cycle fundamental variation as well as vocal fold contact area. The results are compared and contrasted with those from a group of early music and opera singers analysed previously.

    @inproceedings{Daffern2009a,
    title = "Case study of voice quality differences in a soprano singing in different early Music performance styles",
    abstract = "This paper considers the characteristics of three differing styles of singing early music, as characterized by Richard Bethell [1] of the National Early Music Association, UK. In particular, the sung outputs from a postgraduate soprano who was practiced in singing all three styles are analysed along with the output from an electrolaryngograph which provides data on cycle-by-cycle fundamental variation as well as vocal fold contact area. The results are compared and contrasted with those from a group of early music and opera singers analysed previously.",
    keywords = "Closed quotient, Early music, Electrolaryngography, Opera, Singing, Voice acoustics, Voice analysis",
    author = "D. M. Howard and J. S. Brereton and H. Daffern",
    year = "2009",
    language = "English",
    isbn = "9788864530963",
    pages = "175--178",
    booktitle = "Models and Analysis of Vocal Emissions for Biomedical Applications - 6th International Workshop, MAVEBA 2009",
    publisher = "Firenze University Press",
    }

  • G. Siamantas, “An iterative, residual-based approach to unsupervised musical source separation in single-channel mixtures,” PhD Thesis, 2009.
    [BibTeX] [Download PDF]
    @PhdThesis{Siamantas2009,
    author = {Siamantas, Georgios},
    title = {{An iterative, residual-based approach to unsupervised musical source separation in single-channel mixtures}},
    school = {University of York},
    year = {2009},
    url = {http://etheses.whiterose.ac.uk/1504/},
    }

  • G. Spittle, “An investigation into improving speech intelligibility using binaural signal processing,” PhD Thesis, 2009.
    [BibTeX] [Download PDF]
    @PhdThesis{Spittle2009,
    author = {Gary Spittle},
    title = {{An investigation into improving speech intelligibility using binaural signal processing}},
    school = {University of York},
    year = {2009},
    url = {http://etheses.whiterose.ac.uk/1141/},
    }

  • J. B. Thorpe, “Human Sound Localisation Cues and their Relation to Morphology,” PhD Thesis, 2009.
    [BibTeX]
    @PhdThesis{,
    author = {Thorpe, Jonathan B.},
    title = {{Human Sound Localisation Cues and their Relation to Morphology}},
    school = {University of York},
    year = {2009},
    }

  • A. Moore, “Towards the perception of externalised auditory images using binaural technology,” PhD Thesis, 2009.
    [BibTeX]
    @PhdThesis{,
    author = {Alistair Moore},
    title = {Towards the perception of externalised auditory images using binaural technology},
    school = {University of York},
    year = {2009},
    }

2008

  • H. Daffern, “Distinguishing characteristics of vocal techniques in the specialist performance of early music,” PhD Thesis, 2008.
    [BibTeX] [Download PDF]
    @PhdThesis{Daffern2008,
    author = {Daffern, Helena},
    title = {{Distinguishing characteristics of vocal techniques in the specialist performance of early music}},
    school = {University of York},
    year = {2008},
    url = {http://etheses.whiterose.ac.uk/14118/},
    }

2007

  • D. M. Howard, H. Daffern, J. S. Brereton, G. F. Welch, E. Himonides, and A. W. Howard, “A real-time display system for singing development,” The journal of the acoustical society of america, vol. 121, iss. 5, 2007.
    [BibTeX]
    @article{Howard2007,
    title = "A Real-time Display System for Singing Development",
    author = "D. M. Howard and H. Daffern and J. S. Brereton and G. F. Welch and E. Himonides and A. W. Howard",
    year = "2007",
    month = "6",
    day = "5",
    language = "English",
    volume = "121",
    journal = "The Journal of the Acoustical Society of America",
    issn = "0001-4966",
    publisher = "Acoustical Society of America",
    number = "5",
    }

  • S. Pauletto, “Interactive non-speech auditory display of multivariate data,” PhD Thesis, 2007.
    [BibTeX] [Download PDF]
    @PhdThesis{Pauletto2007,
    author = {Pauletto, Sandra},
    title = {{Interactive non-speech auditory display of multivariate data}},
    school = {University of York},
    year = {2007},
    url = {http://etheses.whiterose.ac.uk/14192/},
    }

  • S. B. Shelley, “Diffuse Boundary Modelling in the Digital Waveguide Mesh,” PhD Thesis, 2007.
    [BibTeX] [Download PDF]
    @PhdThesis{,
    author = {Shelley, Simon Benedict},
    title = {{Diffuse Boundary Modelling in the Digital Waveguide Mesh}},
    school = {University of York},
    year = {2007},
    url = {https://www-users.york.ac.uk/~dtm3/Download/SBS_Thesis.pdf},
    }

2006

  • H. Daffern, D. M. Howard, and P. Seymour, “Pilot study of the effects of period instruments on the voice.” 2006, p. 43.
    [BibTeX]
    @Conference{Daffern2006,
    author = {H. Daffern and D. M. Howard and P. Seymour},
    title = {Pilot study of the effects of period instruments on the voice},
    year = {2006},
    pages = {43},
    month = {5},
    day = {1},
    language = {Undefined/Unknown},
    }

  • M. R. Every, “Separation of musical sources and structure from single-channel polyphonic recordings,” PhD Thesis, 2006.
    [BibTeX] [Download PDF]
    @PhdThesis{Every2006,
    author = {Every, Mark Roberts},
    title = {{Separation of musical sources and structure from single-channel polyphonic recordings}},
    school = {University of York},
    year = {2006},
    url = {http://etheses.whiterose.ac.uk/9883/},
    }

  • The 3rd International Physiology and Acoustics of Singing Conference, D. M. Howard, J. S. Brereton, and H. Daffern, Eds., School of Arts and Humanities, Institute of Education, 2006.
    [BibTeX]
    @Book{Howard2006,
    title = {{The 3rd International Physiology and Acoustics of Singing Conference}},
    publisher = {School of Arts and Humanities, Institute of Education},
    year = {2006},
    editor = {D. M. Howard and J. S. Brereton and H. Daffern},
    note = {ISBN: 1-905351-04-6},
    language = {English},
    }

  • J. J. Wells, “Real-time spectral modelling of audio for creative sound transformation,” PhD Thesis, 2006.
    [BibTeX] [Download PDF]
    @PhdThesis{Wells2006,
    author = {Wells, Jeremy J.},
    title = {{Real-time spectral modelling of audio for creative sound transformation}},
    school = {University of York},
    year = {2006},
    url = {http://etheses.whiterose.ac.uk/14084/},
    }

  • J. Mullen, “Physical Modelling of the Vocal Tract with the 2D Digital Waveguide Mesh,” PhD Thesis, 2006.
    [BibTeX] [Download PDF]
    @PhdThesis{,
    author = {Jack Mullen},
    title = {{Physical Modelling of the Vocal Tract with the 2D Digital Waveguide Mesh}},
    school = {University of York},
    year = {2006},
    url = {https://www-users.york.ac.uk/~dtm3/Download/JackThesis.pdf},
    }

2004

  • C. Hetherington, “HRTF estimation by shape parameterization of the human head and pinnae,” PhD Thesis, 2004.
    [BibTeX]
    @PhdThesis{,
    author = {Carl Hetherington},
    title = {{HRTF estimation by shape parameterization of the human head and pinnae}},
    school = {University of York},
    year = {2004},
    }

2003

  • A. Hunt, M. M. Wanderley, and M. Paradis, “The importance of parameter mapping in electronic instrument design,” Journal of new music research, vol. 32, iss. 4, p. 429–440, 2003.
    [BibTeX]
    @Article{Hunt2003,
    author = {A. Hunt and M. M. Wanderley and M. Paradis},
    title = {The importance of parameter mapping in electronic instrument design},
    journal = {Journal of New Music Research},
    year = {2003},
    volume = {32},
    number = {4},
    pages = {429--440},
    publisher = {Taylor \& Francis},
    }