Publication List

2019

  • S. D’Amario, H. Daffern, and F. Bailes, “Perception of synchronization in singing ensembles,” Plos one, vol. 14, iss. 6, 2019. doi:10.1371/journal.pone.0218162
    [BibTeX] [Download PDF]
    @article{D'Amario2019,
    author = {S. D'Amario and H. Daffern and F. Bailes},
    title = {{Perception of synchronization in singing ensembles}},
    journal = {PLoS ONE},
    year = {2019},
    volume = {14},
    number = {6},
    URL = {https://doi.org/10.1371/journal.pone.0218162}
    pages = {e0218162},
    doi = {10.1371/journal.pone.0218162},
    }

  • C. Görres and D. Chesmore, “Active sound production of scarab beetle larvae opens up new possibilities for species-specific pest monitoring in soils,” Nature scientific reports, vol. 9, iss. 1, p. 10115, 2019. doi:10.1038/s41598-019-46121-y
    [BibTeX] [Download PDF]
    @article{Gorres2019,
    Author = {G\"{o}rres, Carolyn-Monika and Chesmore, David},
    Doi = {10.1038/s41598-019-46121-y},
    Isbn = {2045-2322},
    Journal = {Nature Scientific Reports},
    Number = {1},
    Pages = {10115},
    Title = {Active sound production of scarab beetle larvae opens up new possibilities for species-specific pest monitoring in soils},
    Url = {https://doi.org/10.1038/s41598-019-46121-y},
    Volume = {9},
    Year = {2019}
    }

  • D. Johnston, H. Egermann, and G. Kearney, “An interactive spatial audio experience for children with autism spectrum disorder,” in Audio engineering society conference: 2019 aes international conference on immersive and interactive audio, 2019.
    [BibTeX]
    @inproceedings{johnston2019interactive,
    title={An Interactive Spatial Audio Experience for Children with Autism Spectrum Disorder},
    author={Johnston, Daniel and Egermann, Hauke and Kearney, Gavin},
    booktitle={Audio Engineering Society Conference: 2019 AES International Conference on Immersive and Interactive Audio},
    year={2019},
    organization={Audio Engineering Society}
    }

  • D. Johnston, H. Egermann, and G. Kearney, “Measuring the behavioral response to spatial audio within a multi-modal virtual reality environment in children with autism spectrum disorder,” Applied sciences, vol. 9, iss. 15, 2019. doi:10.3390/app9153152
    [BibTeX] [Download PDF]
    @Article{app9153152,
    AUTHOR = {Johnston, Daniel and Egermann, Hauke and Kearney, Gavin},
    TITLE = {Measuring the Behavioral Response to Spatial Audio within a Multi-Modal Virtual Reality Environment in Children with Autism Spectrum Disorder},
    JOURNAL = {Applied Sciences},
    VOLUME = {9},
    YEAR = {2019},
    NUMBER = {15},
    ARTICLE-NUMBER = {3152},
    URL = {https://www.mdpi.com/2076-3417/9/15/3152},
    ISSN = {2076-3417},
    DOI = {10.3390/app9153152}
    }

  • T. McKenzie, D. T. Murphy, and G. Kearney, “Interaural Level Difference Optimisation of First-Order Binaural Ambisonic Rendering,” in Aes conference on immersive and interactive audio, York, UK, 2019.
    [BibTeX]
    @inproceedings{McKenzie2019,
    address = {York, UK},
    author = {T. McKenzie and D. T. Murphy and G. Kearney},
    booktitle = {AES Conference on Immersive and Interactive Audio},
    title = {{Interaural Level Difference Optimisation of First-Order Binaural Ambisonic Rendering}},
    year = {2019}
    }

  • T. Rudzki, I. Gomez-Lanzaco, J. Stubbs, J. Skoglund, D. T. Murphy, and G. Kearney, “Auditory localization in low-bitrate compressed ambisonic scenes,” Applied sciences, vol. 9, iss. 13, 2019. doi:10.3390/app9132618
    [BibTeX] [Download PDF]
    @Article{app9132618,
    AUTHOR = {T. Rudzki and I. Gomez-Lanzaco and J. Stubbs and J. Skoglund and D. T. Murphy and G. Kearney},
    TITLE = {Auditory Localization in Low-Bitrate Compressed Ambisonic Scenes},
    JOURNAL = {Applied Sciences},
    VOLUME = {9},
    YEAR = {2019},
    NUMBER = {13},
    ARTICLE-NUMBER = {2618},
    URL = {https://www.mdpi.com/2076-3417/9/13/2618},
    ISSN = {2076-3417},
    DOI = {10.3390/app9132618}
    }

  • T. Rudzki, P. Hening, I. Gomez-Lanzaco, J. Stubbs, T. McKenzie, J. Skoglund, D. Murphy, and G. Kearney, “Perceptual evaluation of bitrate compressed ambisonic scenes in loudspeaker based reproduction,” in Audio engineering society international conference on immersive and interactive audio, york, uk, 2019.
    [BibTeX]
    @conference{rudzki2019pesacls,
    title = {Perceptual Evaluation of Bitrate Compressed Ambisonic Scenes in Loudspeaker Based Reproduction},
    author = {Rudzki, Tomasz and Hening, Pierce and Gomez-Lanzaco, Ignacio and Stubbs, Jessica and McKenzie, Thomas and Skoglund, Jan and Murphy, Damian and Kearney, Gavin},
    booktitle = {Audio Engineering Society International Conference on Immersive and Interactive Audio, York, UK},
    month = {Mar},
    year = {2019},
    url = {},
    }

  • A. Southern, D. T. Murphy, and L. Savioja, “Boundary absorption approximation in the spatial high-frequency extrapolation method for parametric room impulse response synthesis,” The journal of the acoustical society of america, vol. 145, iss. 4, p. 2770–2782, 2019. doi:10.1121/1.5096162
    [BibTeX]
    @article{Southern2019,
    author = {Alex Southern and Damian T. Murphy and Lauri Savioja},
    title = {Boundary absorption approximation in the spatial high-frequency extrapolation method for parametric room impulse response synthesis},
    journal = {The Journal of the Acoustical Society of America},
    year = {2019},
    volume = {145},
    number = {4},
    pages = {2770--2782},
    month = {apr},
    doi = {10.1121/1.5096162},
    publisher = {Acoustical Society of America ({ASA})}}

  • K. Young, C. Armstrong, A. I. Tew, D. T. Murphy, and G. Kearney, “A Numerical Study into Perceptually-Weighted Spectral Differences between Differently-Spaced HRTFs,” in Audio engineering society international conference on immersive and interactive audio, York, 2019.
    [BibTeX] [Download PDF]
    @inproceedings{Young2019,
    address = {York},
    author = {K. Young and C. Armstrong and A. I. Tew and D. T. Murphy and G. Kearney},
    booktitle = {Audio Engineering Society International Conference on Immersive and Interactive Audio},
    publisher = {Audio Engineering Society},
    title = {{A Numerical Study into Perceptually-Weighted Spectral Differences between Differently-Spaced HRTFs}},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=20386},
    year = {2019}
    }

2018

  • C. Armstrong, L. Thresh, D. T. Murphy, and G. Kearney, “A Perceptual Evaluation of Individual and Non-Individual HRTFs : A Case Study of the SADIE II Database,” Applied sciences, vol. 8, iss. 11, 2018. doi:10.3390/app8112029
    [BibTeX]
    @Article{Armstrong2018,
    author = {C. Armstrong and L. Thresh and D. T. Murphy and G. Kearney},
    title = {{A Perceptual Evaluation of Individual and Non-Individual HRTFs : A Case Study of the SADIE II Database}},
    journal = {Applied Sciences},
    year = {2018},
    volume = {8},
    number = {11},
    doi = {10.3390/app8112029},
    keywords = {binaural,database,evaluation,hrtf,measurement,perception,spatial audio,timbre},
    publisher = {MDPI},
    }

  • C. Armstrong, D. T. Murphy, and G. Kearney, “A Bi-RADIAL Approach to Ambisonics,” in Aes international conference on audio for virtual and augmented reality, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Armstrong2018a,
    author = {C. Armstrong and D. T. Murphy and G. Kearney},
    title = {{A Bi-RADIAL Approach to Ambisonics}},
    booktitle = {AES International Conference on Audio for Virtual and Augmented Reality},
    year = {2018},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19690},
    }

  • C. Armstrong, T. Mckenzie, D. T. Murphy, and G. Kearney, “A Perceptual Spectral Difference Model for Binaural Signals,” in Aes 145th convention, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Armstrong2018b,
    author = {C. Armstrong and T. Mckenzie and D. T. Murphy and G. Kearney},
    title = {{A Perceptual Spectral Difference Model for Binaural Signals}},
    booktitle = {AES 145th Convention},
    year = {2018},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19722},
    }

  • H. Daffern, D. A. Camlin, H. Egermann, A. J. Gully, G. Kearney, C. Neale, and J. Rees-Jones, “Exploring the potential of virtual reality technology to investigate the health and well being benefits of group singing,” International journal of performance arts and digital media, p. 1–22, 2018.
    [BibTeX]
    @article{Daffern2018,
    title={Exploring the potential of virtual reality technology to investigate the health and well being benefits of group singing},
    author={H. Daffern and D. A. Camlin and H. Egermann and A. J. Gully and G. Kearney and C. Neale and J. Rees-Jones},
    journal={International Journal of Performance Arts and Digital Media},
    pages={1--22},
    year={2018},
    publisher={Taylor \& Francis}
    }

  • S. D’Amario, D. M. Howard, H. Daffern, and N. Pennill, “A longitudinal study of intonation in an a cappella singing quintet,” Journal of voice, 2018.
    [BibTeX]
    @article{d2018,
    title={A Longitudinal Study of Intonation in an a cappella Singing Quintet},
    author={S. D'Amario and D. M. Howard and H. Daffern and N. Pennill},
    journal={Journal of Voice},
    year={2018},
    publisher={Elsevier}
    }

  • S. D’Amario, H. Daffern, and F. Bailes, “A longitudinal study investigating synchronization in a singing quintet,” Journal of voice, 2018.
    [BibTeX]
    @article{d2018a,
    title={A longitudinal study investigating synchronization in a singing quintet},
    author={S. D'Amario and H. Daffern and F. Bailes},
    journal={Journal of Voice},
    year={2018},
    publisher={Elsevier}
    }

  • S. D’Amario, H. Daffern, and F. Bailes, “Synchronization in singing duo performances: the roles of visual contact and leadership instruction,” Frontiers in psychology, vol. 9, 2018.
    [BibTeX]
    @article{d2018b,
    title={Synchronization in singing duo performances: The roles of visual contact and leadership instruction},
    author={S. D'Amario and H. Daffern and F. Bailes},
    journal={Frontiers in psychology},
    volume={9},
    year={2018},
    publisher={Frontiers Media SA}
    }

  • S. D’Amario, H. Daffern, and F. Bailes, “A new method of onset and offset detection in ensemble singing,” Logopedics phoniatrics vocology, p. 1–16, 2018.
    [BibTeX]
    @article{d2018c,
    title={A new method of onset and offset detection in ensemble singing},
    author={S. D'Amario and H. Daffern and F. Bailes},
    journal={Logopedics Phoniatrics Vocology},
    pages={1--16},
    year={2018},
    publisher={Taylor \& Francis}
    }

  • A. J. Gully, H. Daffern, and D. T. Murphy, “Diphthong synthesis using the dynamic 3d digital waveguide mesh,” Ieee/acm transactions on audio, speech, and language processing, vol. 26, iss. 2, p. 243–255, 2018.
    [BibTeX]
    @article{gully2018,
    title={Diphthong synthesis using the dynamic 3d digital waveguide mesh},
    author={A. J. Gully and H. Daffern and D. T. Murphy},
    journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing},
    volume={26},
    number={2},
    pages={243--255},
    year={2018},
    publisher={IEEE}
    }

  • D. Johnston, H. Egermann, and G. Kearney, “Innovative computer technology in music based interventions for individuals with autism moving beyond traditional interactive music therapy techniques,” Cogent psychology, p. 1554773, 2018.
    [BibTeX]
    @Article{Johnston2018,
    author = {Johnston, Daniel and Egermann, Hauke and Kearney, Gavin},
    title = {Innovative computer technology in music based interventions for individuals with autism moving beyond traditional interactive music therapy techniques},
    journal = {Cogent Psychology},
    year = {2018},
    pages = {1554773},
    publisher = {Cogent OA},
    }

  • D. Johnston, H. Egermann, and G. Kearney, “SoundFields: A mixed reality spatial audio game for children with autism spectrum disorder,” in Audio engineering society convention 145, 2018.
    [BibTeX]
    @InProceedings{Johnston2018a,
    author = {Johnston, Daniel and Egermann, Hauke and Kearney, Gavin},
    title = {Sound{F}ields: {A} mixed reality spatial audio game for children with autism spectrum disorder},
    booktitle = {Audio Engineering Society Convention 145},
    year = {2018},
    organization = {Audio Engineering Society},
    }

  • M. J. Lopez, G. Kearney, and K. Hofstadter, “Audio description in the UK: what works, what doesn’t and understanding the need for personalising access,” British journal of visual impairment, p. 1–18, 2018. doi:10.1177/0264619618794750
    [BibTeX] [Abstract]

    Audio Description for film and television is a pre-recorded track that uses verbal descriptions to provide information on visual aspects of a film or TV programme. In the UK it is currently the only accessibility strategy available for visually impaired audiences and although it provides access to a large number of people, its shortcomings also fail to engage others in audiovisual experiences. The Enhancing Audio Description project explores how digital audio technologies can be applied to the creation of alternatives to Audio Description with the aim of personalising access strategies. Such personalisation would allow users to select the method utilised to access audiovisual experiences, by having choices that include traditional forms of accessibility as well as sound design based methods. The present article analyses the results of a survey and focus groups in which visually impaired participants discussed the advantages and disadvantages of AD and it demonstrates not only the diversity of experiences and needs of visually impaired groups but also their eagerness for change.

    @Article{Lopez2018,
    author = {M. J. Lopez and G. Kearney and K. Hofstadter},
    title = {Audio description in the {U}{K}: what works, what doesn't and understanding the need for personalising access},
    journal = {British Journal of Visual Impairment},
    year = {2018},
    pages = {1--18},
    month = aug,
    issn = {0264-6196},
    abstract = {Audio Description for film and television is a pre-recorded track that uses verbal descriptions to provide information on visual aspects of a film or TV programme. In the UK it is currently the only accessibility strategy available for visually impaired audiences and although it provides access to a large number of people, its shortcomings also fail to engage others in audiovisual experiences. The Enhancing Audio Description project explores how digital audio technologies can be applied to the creation of alternatives to Audio Description with the aim of personalising access strategies. Such personalisation would allow users to select the method utilised to access audiovisual experiences, by having choices that include traditional forms of accessibility as well as sound design based methods. The present article analyses the results of a survey and focus groups in which visually impaired participants discussed the advantages and disadvantages of AD and it demonstrates not only the diversity of experiences and needs of visually impaired groups but also their eagerness for change.},
    day = {27},
    doi = {10.1177/0264619618794750},
    keywords = {Sound design, Accessibility, Film, Visual impairment},
    }

  • T. McKenzie, D. Murphy, and G. Kearney, “Diffuse-field equalisation of binaural ambisonic rendering,” Applied sciences, vol. 8, iss. 10, 2018. doi:doi:10.3390/app8101956
    [BibTeX]
    @Article{McKenzie2018,
    author = {McKenzie, Thomas and Murphy, Damian and Kearney, Gavin},
    title = {Diffuse-field equalisation of binaural ambisonic rendering},
    journal = {Applied Sciences},
    year = {2018},
    volume = {8},
    number = {10},
    issn = {0036-8075},
    doi = {doi:10.3390/app8101956},
    }

  • T. McKenzie, D. Murphy, and G. Kearney, “Directional bias equalisation of first-order binaural ambisonic rendering,” in Aes conference on audio for virtual and augmented reality, Redmond, 2018. doi:10.1016/S1352-2310(99)00471-9
    [BibTeX]
    @InProceedings{McKenzie2018a,
    author = {McKenzie, Thomas and Murphy, Damian and Kearney, Gavin},
    title = {Directional bias equalisation of first-order binaural ambisonic rendering},
    booktitle = {AES Conference on Audio for Virtual and Augmented Reality},
    year = {2018},
    address = {Redmond},
    doi = {10.1016/S1352-2310(99)00471-9},
    isbn = {0780314476},
    issn = {1352-2310},
    pmid = {7641619},
    }

  • D. R. Méndez, C. Armstrong, J. Stubbs, M. Stiles, and G. Kearney, “Practical Recording Techniques for Music Production with Six-Degrees of Freedom Virtual Reality,” in Aes 145th convention, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Mendez2018,
    author = {M{\'{e}}ndez, David R. and Armstrong, Cal and Stubbs, Jessica and Stiles, Mirek and Kearney, Gavin},
    title = {{Practical Recording Techniques for Music Production with Six-Degrees of Freedom Virtual Reality}},
    booktitle = {AES 145th Convention},
    year = {2018},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19729},
    }

  • R. Rudnicki and J. Brereton, “Sonicules – designing drugs with sound: approaches to sound design for film, audiovisual performance and interactive sonification,” in Soundings: Documentary film and the listening experience, G. Wall, Ed., University of Huddersfield, 2018.
    [BibTeX]
    @incollection{Rudnicki2018,
    title = "Sonicules - Designing drugs with sound: approaches to sound
    design for film, audiovisual performance and interactive
    sonification",
    booktitle = "Soundings: {D}ocumentary Film and the Listening Experience",
    author = "Rudnicki, R and Brereton, J",
    editor = "Wall, Geoffrey",
    publisher = "University of Huddersfield",
    year = 2018
    }

  • T. Rudzki, D. Murphy, and G. Kearney, “A daw-based interactive tool for perceptual spatial audio evaluation,” in Audio engineering society convention 145, 2018.
    [BibTeX] [Download PDF]
    @conference{rudzki2018a,
    title = {A DAW-Based Interactive Tool for Perceptual Spatial Audio Evaluation},
    author = {Rudzki, Tomasz and Murphy, Damian and Kearney, Gavin},
    booktitle = {Audio Engineering Society Convention 145},
    month = {Oct},
    year = {2018},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19730}
    }

  • F. Stevens, D. T. Murphy, and S. L. Smith, “Soundscape auralisation and visualisation: A cross-modal approach to soundscape evaluation,” in Dafx 2018, Aveiro, Portugal, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2018,
    author = {F. Stevens and D. T. Murphy and S. L. Smith},
    title = {Soundscape auralisation and visualisation: {A} cross-modal approach to soundscape evaluation},
    booktitle = {DAFx 2018},
    year = {2018},
    address = {Aveiro, Portugal},
    month = sep,
    url = {http://dafx2018.web.ua.pt/papers/DAFx2018_paper_3.pdf},
    }

  • F. Stevens, “Strategies for environmental sound measurement, modelling, and evaluation,” PhD Thesis, 2018.
    [BibTeX] [Download PDF]
    @phdthesis{stevens2018strategies,
    title={Strategies for Environmental Sound Measurement, Modelling, and Evaluation},
    author={Stevens, Francis},
    year={2018},
    school={University of York},
    url = {http://etheses.whiterose.ac.uk/22661/}
    }

  • S. Ternström, S. D’Amario, and A. Selamtzis, “Effects of the Lung Volume on the Electroglottographic Waveform in Trained Female Singers,” The journal of voice, 2018. doi:10.1016/j.jvoice.2018.09.006
    [BibTeX] [Download PDF]
    @article{Ternstrom2018,
    author = { S. Ternstr\"{o}m and S. D'Amario, and A. Selamtzis},
    title = {{Effects of the Lung Volume on the Electroglottographic Waveform in Trained Female Singers}},
    journal = {The Journal of Voice},
    year = {2018},
    url = {https://doi.org/10.1016/j.jvoice.2018.09.006},
    doi = {10.1016/j.jvoice.2018.09.006},
    }

  • B. Tsui and G. Kearney, “A head-related transfer function database consolidation tool for high variance machine learning algorithms,” in Audio engineering society convention 145, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Tsui2018,
    author = {Tsui, Benjamin and Kearney, Gavin},
    title = {A head-related transfer function database consolidation tool for high variance machine learning algorithms},
    booktitle = {Audio Engineering Society Convention 145},
    year = {2018},
    month = oct,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19716},
    }

  • R. R. Vos, D. T. Murphy, D. M. Howard, and H. Daffern, “Determining the relevant criteria for three-dimensional vocal tract characterization,” Journal of voice, vol. 32, iss. 2, p. 130–142, 2018.
    [BibTeX]
    @article{Vos2018,
    title={Determining the relevant criteria for three-dimensional vocal tract characterization},
    author={R. R. Vos and D. T. Murphy and D. M. Howard and H. Daffern},
    journal={Journal of Voice},
    volume={32},
    number={2},
    pages={130--142},
    year={2018},
    publisher={Elsevier}
    }

  • R. R. Vos, D. T. Murphy, D. M. Howard, and H. Daffern, “The perception of formant tuning in soprano voices,” Journal of voice, vol. 32, iss. 1, p. 126–e1, 2018.
    [BibTeX]
    @article{Vos2018b,
    title={The Perception of Formant Tuning in Soprano Voices},
    author={R. R. Vos and D. T. Murphy and D. M. Howard and H. Daffern},
    journal={Journal of Voice},
    volume={32},
    number={1},
    pages={126--e1},
    year={2018},
    publisher={Elsevier}
    }

  • K. Young, G. Kearney, and A. I. Tew, “Acoustic validation of a BEM-suitable mesh model of KEMAR,” in 2018 aes international conference on spatial reproduction – aesthetics and science, august 6 – 9, 2018, tokyo, japan, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Young2018,
    author = {K. Young and G. Kearney and A. I. Tew},
    title = {Acoustic validation of a {BEM}-suitable mesh model of {KEMAR}},
    booktitle = {2018 AES International Conference on Spatial Reproduction - Aesthetics and Science, August 6 - 9, 2018, Tokyo, Japan},
    year = {2018},
    month = jul,
    publisher = {Audio Engineering Society},
    day = {30},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19662},
    }

  • K. Young, G. Kearney, and A. I. Tew, “Loudspeaker positions with sufficient natural channel separation for binaural reproduction,” in 2018 aes international conference on spatial reproduction – aesthetics and science, august 6 – 9, 2018, tokyo, japan, 2018.
    [BibTeX] [Download PDF]
    @InProceedings{Young2018a,
    author = {K. Young and G. Kearney and A. I. Tew},
    title = {Loudspeaker positions with sufficient natural channel separation for binaural reproduction},
    booktitle = {2018 AES International Conference on Spatial Reproduction - Aesthetics and Science, August 6 - 9, 2018, Tokyo, Japan},
    year = {2018},
    month = jul,
    publisher = {Audio Engineering Society},
    day = {30},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19649},
    }

  • K. Young, M. Lovedee-Turner, J. Brereton, and H. Daffern, “The Impact of Gender on Conference Authorship in Audio Engineering : Analysis Using a New Data Collection Method,” Ieee transactions on education, vol. 61, iss. 4, p. 328 – 335, 2018. doi:10.1109/TE.2018.2814613
    [BibTeX]
    @article{Young2018b,
    author = {K. Young and M. {Lovedee-Turner} and J. Brereton and H. Daffern},
    doi = {10.1109/TE.2018.2814613},
    journal = {IEEE Transactions on Education},
    number = {4},
    pages = {328 -- 335},
    title = {{The Impact of Gender on Conference Authorship in Audio Engineering : Analysis Using a New Data Collection Method}},
    volume = {61},
    year = {2018}
    }

2017

  • C. Armstrong, A. Chadwick, L. Thresh, D. T. Murphy, and G. Kearney, “Simultaneous HRTF Measurement of Multiple Source Configurations Utilizing Semi-Permanent Structural Mounts,” in Aes 143rd convention, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Armstrong2017,
    author = {C. Armstrong and A. Chadwick and L. Thresh and D. T. Murphy and G. Kearney},
    title = {{Simultaneous HRTF Measurement of Multiple Source Configurations Utilizing Semi-Permanent Structural Mounts}},
    booktitle = {AES 143rd Convention},
    year = {2017},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19311},
    }

  • B. Baruah, T. Ward, and J. Brereton, “An e-learning tool for reflective practice and enhancing employability among engineering students,” in European association for education in electrical and information engineering (EAEEIE) annual conference, 2017.
    [BibTeX]
    @INPROCEEDINGS{Baruah2017,
    title = "An e-learning tool for reflective practice and enhancing
    employability among engineering students",
    booktitle = "European Association for Education in Electrical and
    Information Engineering ({EAEEIE}) Annual Conference",
    author = "Baruah, Bidyut and Ward, Tony and Brereton, Jude",
    year = "2017",
    location = "Grenoble, France"
    }

  • J. Brereton, “Music perception and performance in virtual acoustic spaces,” in Body, sound and space in music and beyond: multimodal explorations, C. Wöllner, Ed., Routledge, 2017.
    [BibTeX]
    @INCOLLECTION{Brereton2017,
    title = "Music perception and performance in virtual acoustic spaces",
    booktitle = "Body, Sound and Space in Music and Beyond: Multimodal
    Explorations",
    author = "Brereton, J",
    editor = "W{\"o}llner, Clemens",
    publisher = "Routledge",
    series = "SEMPRE Studies in The Psychology of Music",
    year = "2017"
    }

  • K. Brown, M. Paradis, and D. T. Murphy, “OpenAirLib: A JavaScript library for the acoustics of spaces,” in Audio engineering society convention 142, 2017.
    [BibTeX]
    @inproceedings{Brown2017,
    title={Open{A}ir{L}ib: {A} {J}ava{S}cript Library for the Acoustics of Spaces},
    author={K. Brown and M. Paradis and D. T. Murphy},
    booktitle={Audio Engineering Society Convention 142},
    year={2017},
    organization={Audio Engineering Society}
    }

  • J. G. Burton, D. T. Murphy, and J. S. Brereton, “Perception of low frequency content of amplified music in arenas and Open-Air music festivals,” in Audio engineering society conference: 2017 AES international conference on sound Reinforcement–Open air venues, 2017.
    [BibTeX]
    @INPROCEEDINGS{Burton2017-on,
    title = "Perception of Low Frequency Content of Amplified Music in
    Arenas and {Open-Air} Music Festivals",
    booktitle = "Audio Engineering Society Conference: 2017 {AES} International
    Conference on Sound {Reinforcement--Open} Air Venues",
    author = "Burton, Jonathan G and Murphy, Damian T and Brereton, Jude S",
    publisher = "Audio Engineering Society",
    institution = "Audio Engineering Society",
    year = "2017",
    language = "en"
    }

  • H. Daffern, “Blend in singing ensemble performance: vibrato production in a vocal quartet,” Journal of voice, vol. 31, iss. 3, p. 385.e23 – 385.e29, 2017. doi:https://doi.org/10.1016/j.jvoice.2016.09.007
    [BibTeX] [Download PDF]
    @article{daffern2017,
    title = "Blend in Singing Ensemble Performance: Vibrato Production in a Vocal Quartet",
    journal = "Journal of Voice",
    volume = "31",
    number = "3",
    pages = "385.e23 - 385.e29",
    year = "2017",
    issn = "0892-1997",
    doi = "https://doi.org/10.1016/j.jvoice.2016.09.007",
    url = "http://www.sciencedirect.com/science/article/pii/S0892199716302156",
    author = "H. Daffern",
    }

  • S. D’Amario and H. Daffern, “Using electrolaryngography and electroglottography to assess the singing voice: a systematic review.,” Psychomusicology: music, mind, and brain, vol. 27, iss. 4, p. 229, 2017.
    [BibTeX]
    @article{d2017using,
    title={Using electrolaryngography and electroglottography to assess the singing voice: A systematic review.},
    author={S. D'Amario and H. Daffern},
    journal={Psychomusicology: Music, Mind, and Brain},
    volume={27},
    number={4},
    pages={229},
    year={2017},
    publisher={Educational Publishing Foundation}
    }

  • M. C. Green and D. T. Murphy, “Acoustic scene classification using spatial features,” Proceedings of the detection and classification of acoustic scenes and events, Munich, Germany, p. 16–17, 2017.
    [BibTeX]
    @article{Green2017,
    title={Acoustic Scene Classification Using Spatial Features},
    author={M. C. Green and D. T. Murphy},
    journal={Proceedings of the Detection and Classification of Acoustic Scenes and Events, {M}unich, {G}ermany},
    pages={16--17},
    year={2017}
    }

  • M. C. Green and D. T. Murphy, “Eigenscape: A database of spatial acoustic scene recordings,” Applied sciences, vol. 7, iss. 11, p. 1204, 2017.
    [BibTeX]
    @article{Green2017a,
    title={EigenScape: {A} Database of Spatial Acoustic Scene Recordings},
    author={M. C. Green and D. T. Murphy},
    journal={Applied Sciences},
    volume={7},
    number={11},
    pages={1204},
    year={2017},
    publisher={Multidisciplinary Digital Publishing Institute}
    }

  • T. McKenzie, D. Murphy, and G. Kearney, “Assessing the Authenticity of the KEMAR Mouth Simulator as a Repeatable Speech Source,” in 143rd convention of the audio engineering society, New York, 2017.
    [BibTeX]
    @InProceedings{McKenzie2017,
    author = {McKenzie, Thomas and Murphy, Damian and Kearney, Gavin},
    title = {{Assessing the Authenticity of the KEMAR Mouth Simulator as a Repeatable Speech Source}},
    booktitle = {143rd Convention of the Audio Engineering Society},
    year = {2017},
    address = {New York},
    }

  • T. Mckenzie, G. Kearney, and D. Murphy, “Diffuse-Field Equalisation of First Order Ambisonics,” in 20th international conference on digital audio effects (dafx-17), Edinburgh, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Mckenzie2017,
    author = {Mckenzie, Thomas and Kearney, Gavin and Murphy, Damian},
    title = {{Diffuse-Field Equalisation of First Order Ambisonics}},
    booktitle = {20th International Conference on Digital Audio Effects (DAFx-17)},
    year = {2017},
    address = {Edinburgh},
    url = {http://www.dafx17.eca.ed.ac.uk/papers/DAFx17_paper_31.pdf},
    }

  • D. T. Murphy, S. Shelley, A. Foteinou, J. Brereton, and H. Daffern, “Acoustic heritage and audio creativity: the creative application of sound in the representation, understanding and experience of past environments,” Internet archaeology, 2017.
    [BibTeX]
    @article{Murphy2017,
    title={Acoustic Heritage and Audio Creativity: the Creative Application of Sound in the Representation, Understanding and Experience of Past Environments},
    author={D. T. Murphy and S. Shelley and A. Foteinou and J. Brereton and H. Daffern},
    journal={Internet Archaeology},
    year={2017},
    publisher={Council for British Archaeology}
    }

  • H. Riaz, M. Stiles, C. Armstrong, H. Lee, and G. Kearney, “Multichannel Microphone Array Recording for Popular Music Production in Virtual Reality,” in Aes 143rd convention, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Riaz2017,
    author = {Riaz, Hashim and Stiles, Mirek and Armstrong, Cal and Lee, Hyunkook and Kearney, Gavin},
    title = {{Multichannel Microphone Array Recording for Popular Music Production in Virtual Reality}},
    booktitle = {AES 143rd Convention},
    year = {2017},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19333},
    }

  • A. Southern, F. Stevens, and D. T. Murphy, “Sounding out smart cities: Auralization and soundscape monitoring for environmental sound design,” The journal of the acoustical society of america, vol. 141, iss. 5, p. 3880, 2017. doi:10.1121/1.4988686
    [BibTeX]
    @Article{Southern2017,
    author = {A. Southern and F. Stevens and D. T. Murphy},
    title = {Sounding out smart cities: {A}uralization and soundscape monitoring for environmental sound design},
    journal = {The Journal of the Acoustical Society of America},
    year = {2017},
    volume = {141},
    number = {5},
    pages = {3880},
    doi = {10.1121/1.4988686},
    }

  • F. Stevens, D. T. Murphy, and S. L. Smith, “Ecological validity of stereo UHJ soundscape reproduction,” in In proceedings of the 142nd audio engineering society (aes) convention, Berlin, Germany, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2017,
    author = {F. Stevens and D. T. Murphy and S. L. Smith},
    title = {Ecological validity of stereo {UHJ} soundscape reproduction},
    booktitle = {In Proceedings of the 142nd Audio Engineering Society (AES) Convention},
    year = {2017},
    address = {Berlin, Germany},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18641},
    }

  • F. Stevens, D. T. Murphy, L. Savioja, and V. Valimaki, “Modeling sparsely reflecting outdoor acoustic scenes using the waveguide web,” Ieee/acm transactions on audio, speech and language processing (taslp), vol. 25, iss. 8, p. 1566–1578, 2017. doi:10.1109/TASLP.2017.2699424
    [BibTeX]
    @Article{Stevens2017a,
    author = {F. Stevens and D. T. Murphy and L. Savioja and V. Valimaki},
    title = {Modeling sparsely reflecting outdoor acoustic scenes using the waveguide web},
    journal = {IEEE/ACM Transactions on Audio, Speech and Language Processing (TASLP)},
    year = {2017},
    volume = {25},
    number = {8},
    pages = {1566--1578},
    issn = {2329-9290},
    doi = {10.1109/TASLP.2017.2699424},
    }

  • F. Stevens, D. T. Murphy, and S. L. Smith, “Soundscape categorisation and the self-assessment manikin,” in Proceedings of the 20th international conference on digital audio effects (dafx-17), Edinburgh, UK, 2017.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2017b,
    author = {Stevens, F. and Murphy, D. T. and Smith, S. L.},
    title = {Soundscape categorisation and the self-assessment manikin},
    booktitle = {Proceedings of the 20th International Conference on Digital Audio Effects (DAFx-17)},
    year = {2017},
    address = {Edinburgh, UK},
    url = {http://www.dafx17.eca.ed.ac.uk/papers/DAFx17_paper_7.pdf},
    }

  • L. Thresh, C. Armstrong, and G. Kearney, “A direct comparison of localization performance when using first, third, and fifth ambisonics order for real loudspeaker and virtual loudspeaker rendering,” in Audio engineering society convention 143, 2017.
    [BibTeX] [Download PDF]
    @Conference{Thresh2017,
    author = {Thresh, Lewis and Armstrong, Cal and Kearney, Gavin},
    title = {A direct comparison of localization performance when using first, third, and fifth ambisonics order for real loudspeaker and virtual loudspeaker rendering},
    booktitle = {Audio Engineering Society Convention 143},
    year = {2017},
    month = oct,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=19261},
    }

  • R. R. Vos, H. Daffern, and D. M. Howard, “Resonance tuning in three girl choristers,” Journal of voice, vol. 31, iss. 1, p. 122.e1 – 122.e7, 2017. doi:https://doi.org/10.1016/j.jvoice.2016.01.013
    [BibTeX] [Download PDF]
    @article{Vos2017,
    title = "Resonance Tuning in Three Girl Choristers",
    journal = "Journal of Voice",
    volume = "31",
    number = "1",
    pages = "122.e1 - 122.e7",
    year = "2017",
    issn = "0892-1997",
    doi = "https://doi.org/10.1016/j.jvoice.2016.01.013",
    url = "http://www.sciencedirect.com/science/article/pii/S0892199716000291",
    author = "R. R. Vos and H. Daffern and D. M. Howard",
    }

  • R. Zolfaghari, N. Epain, C. Jin, J. Glaunés, and A. I. Tew, “Kernal principal component analysis of the ear morphology,” in Icassp 2017, new orleans, usa, 2017. doi:10.1109/ICASSP.2017.7952202
    [BibTeX] [Abstract]

    This paper describes features in the ear shape that change across a population of ears and explores the corresponding changes in ear acoustics. The statistical analysis conducted over the space of ear shapes uses a kernel principal component analysis (KPCA). Further, it utilizes the framework of large deformation diffeomorphic metric mapping and the vector space that is constructed over the space of initial momentums, which describes the diffeomorphic transformations from the reference template ear shape. The population of ear shapes examined by the KPCA are 124 left and right ear shapes from the SYMARE database that were rigidly aligned to the template (population average) ear. In the work presented here we show the morphological variations captured by the first two kernel principal components, and also show the acoustic transfer functions of the ears which are computed using fast multipole boundary element method simulations.

    @InProceedings{Zolfaghari2017,
    author = {R. Zolfaghari and N. Epain and C. Jin and J. Glaun{\'e}s and A. I. Tew},
    title = {Kernal principal component analysis of the ear morphology},
    booktitle = {ICASSP 2017, New Orleans, USA},
    year = {2017},
    month = mar,
    publisher = {IEEE},
    abstract = {This paper describes features in the ear shape that change across a population of ears and explores the corresponding changes in ear acoustics. The statistical analysis conducted over the space of ear shapes uses a kernel principal component analysis (KPCA). Further, it utilizes the framework of large deformation diffeomorphic metric mapping and the vector space that is constructed over the space of initial momentums, which describes the diffeomorphic transformations from the reference template ear shape. The population of ear shapes examined by the KPCA are 124 left and right ear shapes from the SYMARE database that were rigidly aligned to the template (population average) ear. In the work presented here we show the morphological variations captured by the first two kernel principal components, and also show the acoustic transfer functions of the ears which are computed using fast multipole boundary element method simulations.},
    day = {5},
    doi = {10.1109/ICASSP.2017.7952202},
    issn = {2379-190X},
    keywords = {Morphoacoustics, LDDMM, Kernel principal Component analysis, Ear shape analysis, FM-BEM},
    }

2016

  • C. Armstrong and J. Brereton, “The Application of Flexilink in Multi-User Virtual Acoustic Environments,” in Proceedings of the interactive audio systems symposium, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Armstrong2016,
    author = {Armstrong, Cal and Brereton, Jude},
    title = {{The Application of Flexilink in Multi-User Virtual Acoustic Environments}},
    booktitle = {Proceedings of the Interactive Audio Systems Symposium},
    year = {2016},
    url = {https://www.york.ac.uk/sadie-project//IASS2016/IASS{\_}Papers/IASS{\_}2016{\_}paper{\_}20.pdf},
    }

  • C. Armstrong and J. Brereton, “A Filter Based Approach to Sound Source Simulation Through an Outward Facing Spherical Array of Loudspeakers 2 Related Work Directivity Measurements Definitions,” in Proceedings of the interactive audio systems symposium, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Armstrong2016a,
    author = {Armstrong, Cal and Brereton, Jude},
    title = {{A Filter Based Approach to Sound Source Simulation Through an Outward Facing Spherical Array of Loudspeakers 2 Related Work Directivity Measurements Definitions}},
    booktitle = {Proceedings of the Interactive Audio Systems Symposium},
    year = {2016},
    url = {https://www.york.ac.uk/sadie-project//IASS2016/IASS{\_}Papers/IASS{\_}2016{\_}paper{\_}21.pdf},
    }

  • J. Brereton, “Making learning authentic: real-world assessments for masters level study,” Forum, iss. 40, p. 14–15, 2016.
    [BibTeX]
    @ARTICLE{Brereton2016-os,
    title = "Making learning authentic: real-world assessments for masters
    level study",
    author = "Brereton, J",
    journal = "Forum",
    number = 40,
    pages = "14--15",
    year = 2016
    }

  • M. C. Green, J. Szymanski, and M. Speed, “Assessing the suitability of the magnitude slope deviation detection criterion for use in automatic acoustic feedback control,” in Dafx 16, 2016, pp. 85-92.
    [BibTeX] [Download PDF]
    @inproceedings{Green2016,
    Author = {M. C. Green and J. Szymanski and M. Speed},
    Booktitle = {DAFx 16},
    Month = {September},
    Pages = {85 - 92},
    Title = {Assessing the Suitability of the Magnitude Slope Deviation Detection Criterion for use in Automatic Acoustic Feedback Control},
    url = {http://dafx16.vutbr.cz/dafxpapers/12-DAFx-16_paper_23-PN.pdf},
    Year = {2016}}

  • S. Hughes and G. Kearney, “Auditory immersion of 5.1 virtualization within gameplay,” Millennium biltmore hotel, los angeles, ca, p. 18, 2016.
    [BibTeX]
    @Article{Hughes2016,
    author = {S. Hughes and G. Kearney},
    title = {Auditory immersion of 5.1 virtualization within gameplay},
    journal = {Millennium Biltmore Hotel, Los Angeles, CA},
    year = {2016},
    pages = {18},
    }

  • S. Hughes and G. Kearney, “Moving virtual source perception in 2D space,” in Audio engineering society conference: 2016 aes international conference on audio for virtual and augmented reality, 2016.
    [BibTeX] [Download PDF]
    @Conference{Hughes2016a,
    author = {S. Hughes and G. Kearney},
    title = {Moving virtual source perception in 2{D} space},
    booktitle = {Audio Engineering Society Conference: 2016 AES International Conference on Audio for Virtual and Augmented Reality},
    year = {2016},
    month = sep,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18492},
    }

  • A. Hunt, Managing your project: Achieving success with minimal stress, CreateSpace Independent Publishing Platform, 2016.
    [BibTeX] [Download PDF]
    @Book{Hunt2016,
    title = {Managing your project: {A}chieving success with minimal stress},
    publisher = {CreateSpace Independent Publishing Platform},
    year = {2016},
    author = {Hunt, A.},
    isbn = {9781537212203},
    url = {https://books.google.co.uk/books?id=gvs1vgAACAAJ},
    }

  • G. Kearney, “Auditory height perception in cross-talk cancellation using low order HRTF approximation,” in Reproduced sound, 2016.
    [BibTeX]
    @InProceedings{Kearney2016,
    author = {Kearney, Gavin},
    title = {Auditory height perception in cross-talk cancellation using low order {HRTF} approximation},
    booktitle = {Reproduced Sound},
    year = {2016},
    organization = {Institute of Acoustics},
    }

  • G. Kearney, “The perception of auditory height in individualised and non-individualized dynamic cross-talk cancellation,” in Audio engineering society conference: 2016 aes international conference on sound field control, 2016.
    [BibTeX] [Download PDF]
    @Conference{Kearney2016a,
    author = {G. Kearney},
    title = {The perception of auditory height in individualised and non-individualized dynamic cross-talk cancellation},
    booktitle = {Audio Engineering Society Conference: 2016 AES International Conference on Sound Field Control},
    year = {2016},
    month = jul,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18323},
    }

  • G. Kearney, H. Daffern, L. Thresh, H. Omodudu, C. Armstrong, and J. Brereton, “Design of an interactive virtual reality system for ensemble singing,” in Interactive audio systems symposium, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Kearney2016b,
    author = {Kearney, Gavin and Daffern, Helena and Thresh, Lewis and Omodudu, Haroom and Armstrong, Calum and Brereton, Jude},
    title = {Design of an interactive virtual reality system for ensemble singing},
    booktitle = {Interactive Audio Systems Symposium},
    year = {2016},
    organization = {University of York},
    url = { https://www.york.ac.uk/sadie-project/IASS2016/IASS_Papers/IASS_2016_paper_1.pdf },
    }

  • G. C. Kearney, H. Daffern, L. Thresh, H. Omodudu, C. Armstrong, and J. S. Brereton, “Design of an interactive virtual reality system for ensemble singing,” in Proceedings of the interactive audio systems symposium, 2016.
    [BibTeX]
    @INPROCEEDINGS{Kearney2016-ju,
    title = "Design of an Interactive Virtual Reality System for Ensemble
    Singing",
    booktitle = "Proceedings of the Interactive Audio Systems Symposium",
    author = "Kearney, Gavin Cyril and Daffern, Helena and Thresh, Lewis and
    Omodudu, Haroom and Armstrong, Calum and Brereton, Judith Sara",
    year = 2016,
    language = "English"
    }

  • M. Lopez, G. Kearney, and K. Hofstädter, “Enhancing audio description, spatialisation and accessibility in film and television,” in Proc. reproduced sound conference, 2016.
    [BibTeX]
    @InProceedings{Lopez2016,
    author = {M. Lopez and G. Kearney and K. Hofst{\"a}dter},
    title = {Enhancing audio description, spatialisation and accessibility in film and television},
    booktitle = {Proc. Reproduced Sound Conference},
    year = {2016},
    volume = {38},
    number = {Pt 2},
    }

  • M. Lovedee-Turner, J. Brereton, and D. Murphy, “An algorithmic approach to the manipulation of B-Format impulse responses for sound source rotation,” in Audio engineering society conference: 61st international conference: audio for games, 2016.
    [BibTeX]
    @INPROCEEDINGS{Lovedee-Turner2016-ut,
    title = "An Algorithmic Approach to the Manipulation of {B-Format}
    Impulse Responses for Sound Source Rotation",
    booktitle = "Audio Engineering Society Conference: 61st International
    Conference: Audio for Games",
    author = "Lovedee-Turner, Michael and Brereton, Jude and Murphy, Damian",
    year = 2016
    }

  • C. Pike, F. Melchior, and A. I. Tew, “Descriptive analysis of binaural rendering with virtual loudspeakers using a rate-all-that-apply approach,” in Proc. aes conference on headphone technology 2016, 2016.
    [BibTeX] [Abstract] [Download PDF]

    Spatial audio content for headphones is often created using binaural rendering of a virtual loudspeaker array. It is important to understand the effect of this choice on the sound quality. A sensory profiling evaluation was used to assess the perceived differences between direct binaural rendering and virtual loudspeaker rendering of a single sound source with and without head tracking and using anechoic and reverberant binaural impulse responses. A subset of the Spatial Audio Quality Inventory (SAQI) was used. Listeners first selected only attributes that they felt applied to the given stimuli. Initial analysis shows that tone colour and source direction are most affected by the use of this technique, but source extent, distance, and externalisation are also affected. Further work is required to analyse the sparse attribute rating data in depth.

    @InProceedings{Pike2016,
    author = {C. Pike and F. Melchior and A. I. Tew},
    title = {Descriptive analysis of binaural rendering with virtual loudspeakers using a rate-all-that-apply approach},
    booktitle = {Proc. AES Conference on Headphone Technology 2016},
    year = {2016},
    month = aug,
    abstract = {Spatial audio content for headphones is often created using binaural rendering of a virtual loudspeaker array. It is important to understand the effect of this choice on the sound quality. A sensory profiling evaluation was used to assess the perceived differences between direct binaural rendering and virtual loudspeaker rendering of a single sound source with and without head tracking and using anechoic and reverberant binaural impulse responses. A subset of the Spatial Audio Quality Inventory (SAQI) was used. Listeners first selected only attributes that they felt applied to the given stimuli. Initial analysis shows that tone colour and source direction are most affected by the use of this technique, but source extent, distance, and externalisation are also affected. Further work is required to analyse the sparse attribute rating data in depth.},
    day = {19},
    keywords = {binaural, virtual auditory space, perceptual quality},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18348},
    }

  • D. Robinson and G. Kearney, “Echolocation in virtual reality,” in Interactive audio systems symposium, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Robinson2016,
    author = {Robinson, Darren and Kearney, Gavin},
    title = {Echolocation in virtual reality},
    booktitle = {Interactive Audio Systems Symposium},
    year = {2016},
    organization = {University of York},
    url = {#https://www.york.ac.uk/sadie-project/IASS2016/IASS_Papers/IASS_2016_paper_22.pdf#},
    }

  • J. Sanderson and A. Hunt, “Using real-time sonification of heart rate data to provide a mobile based training aid for runners,” in Interactive audio systems symposium 2016, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Sanderson2016,
    author = {J. Sanderson and A. Hunt},
    title = {Using real-time sonification of heart rate data to provide a mobile based training aid for runners},
    booktitle = {Interactive Audio Systems Symposium 2016},
    year = {2016},
    url = {https://www.york.ac.uk/sadie-project/IASS2016/IASS_Papers/IASS_2016_paper_5.pdf},
    }

  • F. Stevens, D. T. Murphy, and S. L. Smith, “Emotion and soundscape preference rating: using semantic differential pairs and the self-assessment manikin,” in Sound and music computing conference, hamburg, 2016, Hamburg, Germany, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2016,
    author = {Stevens, F. and Murphy, D. T. and Smith, S. L.},
    title = {Emotion and soundscape preference rating: using semantic differential pairs and the self-assessment manikin},
    booktitle = {Sound and Music Computing conference, Hamburg, 2016},
    year = {2016},
    address = {Hamburg, Germany},
    url = {https://smc2016.hfmt-hamburg.de/wp-content/uploads/2016/09/SMC2016_proceedings.pdf},
    }

  • F. Stevens, D. T. Murphy, and S. L. Smith, “The Self-Assessment Manikin and heart rate: Responses to auralised soundscapes,” in Interactive audio systems symposium 2016, York, UK, 2016.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2016a,
    author = {F. Stevens and D. T. Murphy and S. L. Smith},
    title = {The {S}elf-{A}ssessment {M}anikin and heart rate: {R}esponses to auralised soundscapes},
    booktitle = {Interactive Audio Systems Symposium 2016},
    year = {2016},
    address = {York, UK},
    url = {https://www.york.ac.uk/sadie-project/IASS2016/IASS_Papers/IASS_2016_paper_3.pdf},
    }

  • K. Young, G. Kearney, and A. I. Tew, “Boundary element modelling of KEMAR for binaural rendering: Mesh production and validation.” 2016.
    [BibTeX] [Abstract] [Download PDF]

    Head and torso simulators are used extensively within acoustic research, often in place of human subjects in time-consuming or repetitive experiments. Particularly common is the Knowles Electronics Manikin for Acoustic Research (KEMAR), which has the acoustic auditory properties of an average human head. As an alternative to physical acoustic measurements, the boundary element method (BEM) is widely used to calculate the propagation of sound using computational models of a scenario. Combining this technique with a compatible 3D surface mesh of KEMAR would allow for detailed binaural analysis of speaker distributions and decoder design – without the disadvantages associated with making physical measurements. This paper details the development and validation of a BEM-compatible mesh model of KEMAR, based on the original computer-aided design (CAD) file and valid up to 20 kHz. Use of the CAD file potentially allows a very close match to be achieved between the mesh and the physical manikin. The mesh is consistent with the original CAD description, both in terms of overall volume and of local topology, and the numerical requirements for BEM compatibility have been met. Computational limitations restrict usage of the mesh in its current state, so simulation accuracy cannot as yet be compared with acoustically measured HRTFs. Future work will address the production of meshes suitable for use in BEM with lower computational requirements, using the process validated in this work.

    @Conference{Young2016,
    author = {K. Young and G. Kearney and A. I. Tew},
    title = {Boundary element modelling of {KEMAR} for binaural rendering: {M}esh production and validation},
    year = {2016},
    month = sep,
    note = {Interactive Audio Systems Symposium, IASS ; Conference date: 23-09-2016 Through 23-09-2016},
    abstract = {Head and torso simulators are used extensively within acoustic research, often in place of human subjects in time-consuming or repetitive experiments. Particularly common is the Knowles Electronics Manikin for Acoustic Research (KEMAR), which has the acoustic auditory properties of an average human head. As an alternative to physical acoustic measurements, the boundary element method (BEM) is widely used to calculate the propagation of sound using computational models of a scenario. Combining this technique with a compatible 3D surface mesh of KEMAR would allow for detailed binaural analysis of speaker distributions and decoder design - without the disadvantages associated with making physical measurements.
    This paper details the development and validation of a BEM-compatible mesh model of KEMAR, based on the original computer-aided design (CAD) file and valid up to 20 kHz. Use of the CAD file potentially allows a very close match to be achieved between the mesh and the physical manikin. The mesh is consistent with the original CAD description, both in terms of overall volume and of local topology, and the numerical requirements for BEM compatibility have been met. Computational limitations restrict usage of the mesh in its current state, so simulation accuracy cannot as yet be compared with acoustically measured HRTFs. Future work will address the production of meshes suitable for use in BEM with lower computational requirements, using the process validated in this work.},
    day = {23},
    url = {https://www.york.ac.uk/sadie-project/IASS2016.html},
    }

  • R. Zolfaghari, N. Epain, C. Jin, J. Glaunés, and A. I. Tew, “Generating a morphable model of ears,” in Icassp 2016, brisbane, australia, 2016. doi:10.1109/ICASSP.2016.7471981
    [BibTeX] [Abstract]

    This paper describes the generation of a morphable model for external ear shapes. The aim for the morphable model is to characterize an ear shape using only a few parameters in order to assist the study of morphoacoustics. The model is derived from a statistical analysis of a population of 58 ears from the SYMARE database. It is based upon the framework of large deformation diffeomorphic metric mapping (LDDMM) and the vector space that is constructed over the space of initial momentums describing the diffeomorphic transformations. To develop a morphable model using the LDDMM framework, the initial momentums are analyzed using a kernel based principal component analysis. In this paper, we examine the ability of our morphable model to construct test ear shapes not included in the principal component analysis.

    @InProceedings{Zolfaghari2016,
    author = {R. Zolfaghari and N. Epain and C. Jin and J. Glaun{\'e}s and A. I. Tew},
    title = {Generating a morphable model of ears},
    booktitle = {ICASSP 2016, Brisbane, Australia},
    year = {2016},
    month = mar,
    publisher = {IEEE},
    note = {2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP); Conference date: 20-03-2016 Through 25-03-2016},
    abstract = {This paper describes the generation of a morphable model for external ear shapes. The aim for the morphable model is to characterize an ear shape using only a few parameters in order to assist the study of morphoacoustics. The model is derived from a statistical analysis of a population of 58 ears from the SYMARE database. It is based upon the framework of large deformation diffeomorphic metric mapping (LDDMM) and the vector space that is constructed over the space of initial momentums describing the diffeomorphic transformations. To develop a morphable model using the LDDMM framework, the initial momentums are analyzed using a kernel based principal component analysis. In this paper, we examine the ability of our morphable model to construct test ear shapes not included in the principal component analysis.},
    day = {16},
    doi = {10.1109/ICASSP.2016.7471981},
    issn = {2379-190X},
    }

2015

  • N. Degara, A. Hunt, and T. Hermann, “Interactive Sonification [Guest editors’ introduction],” Ieee multimedia, vol. 22, iss. 1, p. 20–23, 2015.
    [BibTeX]
    @Article{Degara2015,
    author = {Degara, Norberto and Hunt, Andy and Hermann, Thomas},
    title = {Interactive {S}onification [{G}uest editors' introduction]},
    journal = {IEEE MultiMedia},
    year = {2015},
    volume = {22},
    number = {1},
    pages = {20--23},
    publisher = {IEEE},
    }

  • J. Gao and A. I. Tew, “The segregation of spatialised speech in interference by optimal mapping of diverse cues,” in 2015 ieee international conference on acoustics, speech, and signal processing, 2015, p. 2095–2099. doi:10.1109/ICASSP.2015.7178340
    [BibTeX] [Abstract]

    We describe optimal cue mapping (OCM), a potentially real-time binaural signal processing method for segregating a sound source in the presence of multiple interfering 3D sound sources. Spatial cues are extracted from a multisource binaural mixture and used to train artificial neural networks (ANNs) to estimate the spectral energy fraction of a wanted speech source in the mixture. Once trained, the ANN outputs form a spectral ratio mask which is applied frame-by-frame to the mixture to approximate the magnitude spectrum of the wanted speech. The speech intelligibility performance of the OCM algorithm for anechoic sound sources is evaluated on previously unseen speech mixtures using the STOI automated measures, and compared with an established reference method. The optimized integration of multiple cues offers clear performance benefits and the ability to quantify the relative importance of each cue will facilitate computationally efficient implementations.

    @InProceedings{Gao2015,
    author = {J. Gao and A. I. Tew},
    title = {The segregation of spatialised speech in interference by optimal mapping of diverse cues},
    booktitle = {2015 IEEE International Conference on Acoustics, Speech, and Signal Processing},
    year = {2015},
    pages = {2095--2099},
    month = apr,
    publisher = {IEEE},
    abstract = {We describe optimal cue mapping (OCM), a potentially real-time binaural signal processing method for segregating a sound source in the presence of multiple interfering 3D sound sources. Spatial cues are extracted from a multisource binaural mixture and used to train artificial neural networks (ANNs) to estimate the spectral energy fraction of a wanted speech source in the mixture. Once trained, the ANN outputs form a spectral ratio mask which is applied frame-by-frame to the mixture to approximate the magnitude spectrum of the wanted speech. The speech intelligibility performance of the OCM algorithm for anechoic sound sources is evaluated on previously unseen speech mixtures using the STOI automated measures, and compared with an established reference method. The optimized integration of multiple cues offers clear performance benefits and the ability to quantify the relative importance of each cue will facilitate computationally efficient implementations.},
    day = {19},
    doi = {10.1109/ICASSP.2015.7178340},
    isbn = {978-1-4673-6997-8},
    keywords = {Speech segregation, Artificial Neural Networks, ratio mask},
    }

  • A. F. Hinde, M. J. Evans, A. I. Tew, and D. M. Howard, “Onset asynchrony in spoken menus,” in International conference on auditory display (icad) 2015, 2015, p. 86–93.
    [BibTeX] [Abstract] [Download PDF]

    The menu is an important interface component, which appears unlikely to be completely superseded by modern search-based approaches. For someone who is unable to attend a screen visually, however, alternative non-visual menu formats are often problematic. A display is developed in which multiple concurrent words are presented with different amounts of onset asynchrony. The effect of different amounts of asynchrony and word length on task durations, accuracy and workload are explored. It is found that total task duration is significantly affected by both onset asynchrony and word duration. Error rates are significantly affected by both onset asynchrony, word length and their interaction, whilst subjective workload scores are only significantly affected by onset asynchrony. Overall, the results appear to suggest that the best compromise between accuracy, workload and speed may be achieved through presenting shorter or temporally-compressed words with a short inter-stimuli interval.

    @InProceedings{Hinde2015,
    author = {A. F. Hinde and M. J. Evans and A. I. Tew and D. M. Howard},
    title = {Onset asynchrony in spoken menus},
    booktitle = {International Conference on Auditory Display (ICAD) 2015},
    year = {2015},
    pages = {86--93},
    month = jul,
    note = {This work is licensed under Creative Commons Attribution Non Commercial 4.0 International License.},
    abstract = {The menu is an important interface component, which appears unlikely to be completely superseded by modern search-based approaches. For someone who is unable to attend a screen visually, however, alternative non-visual menu formats are often problematic. A display is developed in which multiple concurrent words are presented with different amounts of onset asynchrony. The effect of different amounts of asynchrony and word length on task durations, accuracy and workload are explored. It is found that total task duration is significantly affected by both onset asynchrony and word duration. Error rates are significantly affected by both onset asynchrony, word length and their interaction, whilst subjective workload scores are only significantly affected by onset asynchrony. Overall, the results appear to suggest that the best compromise between accuracy, workload and speed may be achieved through presenting shorter or temporally-compressed words with a short inter-stimuli interval.},
    day = {8},
    isbn = {978-3-902949-01-1},
    keywords = {ONSET ASYNCHRONY, Menu navigation, Speech},
    url = {http://hdl.handle.net/1853/54112},
    }

  • L. Hobden and A. I. Tew, “Investigating head-related transfer function spectral smoothing using a sagittal plane auditory localization model,” in Proc 2015 ieee workshop on applications of signal processing to audio and acoustics (waspaa), new paltz, ny, 2015. doi:10.1109/WASPAA.2015.7336955
    [BibTeX] [Abstract]

    A new head-related transfer function (HRTF) smoothing algorithm is presented. HRTF magnitude responses are expressed on an equivalent rectangular bandwidth frequency scale and smoothing is increased by progressively discarding the higher frequency Fourier coefficients. A sagittal plane localization model was used to assess the degree of spectral smoothing that can be applied without significant increase in localization error. The results of the localization model simulation were compared with results from a previous perceptual investigation using an algorithm that discards coefficients on a linear frequency scale. Our findings suggest that using a perceptually motivated frequency scale yields similar localization performance using fewer than half the number of coefficients.

    @InProceedings{Hobden2015,
    author = {L. Hobden and A. I. Tew},
    title = {Investigating head-related transfer function spectral smoothing using a sagittal plane auditory localization model},
    booktitle = {Proc 2015 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA), New Paltz, NY},
    year = {2015},
    abstract = {A new head-related transfer function (HRTF) smoothing algorithm is presented. HRTF magnitude responses are expressed on an equivalent rectangular bandwidth frequency scale and smoothing is increased by progressively discarding the higher frequency Fourier coefficients. A sagittal plane localization model was used to assess the degree of spectral smoothing that can be applied without significant increase in localization error. The results of the localization model simulation were compared with results from a previous perceptual investigation using an algorithm that discards coefficients on a linear frequency scale. Our findings suggest that using a perceptually motivated frequency scale yields similar localization performance using fewer than half the number of coefficients.},
    doi = {10.1109/WASPAA.2015.7336955},
    keywords = {head-related transfer function, spatial sound, spectral smoothing, auditory localization model},
    }

  • S. Hughes and G. Kearney, “Fear and localisation: Emotional fine-tuning utlising multiple source directions,” in Audio engineering society conference: 56th international conference: audio for games, 2015.
    [BibTeX] [Download PDF]
    @Conference{Hughes2015,
    author = {S. Hughes and G. Kearney},
    title = {Fear and localisation: {E}motional fine-tuning utlising multiple source directions},
    booktitle = {Audio Engineering Society Conference: 56th International Conference: Audio for Games},
    year = {2015},
    month = feb,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17596},
    }

  • G. Kearney and T. Doyle, “A virtual loudspeaker database for ambisonics research,” in Icsa 2015, 2015.
    [BibTeX]
    @InProceedings{Kearney2015,
    author = {G. Kearney and T. Doyle},
    title = {A virtual loudspeaker database for ambisonics research},
    booktitle = {ICSA 2015},
    year = {2015},
    month = sep,
    publisher = {Verband Deutscher Tonmeister e.V.},
    day = {8},
    }

  • G. Kearney and T. Doyle, “On prediction of auditory height in ambisonics,” in Tagungsbericht icsa 2015:, 2015.
    [BibTeX]
    @InProceedings{Kearney2015a,
    author = {G. Kearney and T. Doyle},
    title = {On prediction of auditory height in ambisonics},
    booktitle = {Tagungsbericht ICSA 2015:},
    year = {2015},
    month = sep,
    publisher = {Verband Deutscher Tonmeister e.V.},
    day = {8},
    }

  • G. Kearney and T. Doyle, “An HRTF database for virtual loudspeaker rendering,” in Audio engineering society convention 139, 2015.
    [BibTeX] [Download PDF]
    @Conference{Kearney2015b,
    author = {Kearney, Gavin and Doyle, Tony},
    title = {An {HRTF} database for virtual loudspeaker rendering},
    booktitle = {Audio Engineering Society Convention 139},
    year = {2015},
    month = oct,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17980},
    }

  • G. Kearney, X. Liu, A. Manns, and M. Gorzel, “Auditory distance perception with static and dynamic binaural rendering,” in Audio engineering society conference: 57th international conference: the future of audio entertainment technology & cinema, television and the internet, 2015.
    [BibTeX] [Download PDF]
    @Conference{Kearney2015c,
    author = {G. Kearney and X. Liu and A. Manns and M. Gorzel},
    title = {Auditory distance perception with static and dynamic binaural rendering},
    booktitle = {Audio Engineering Society Conference: 57th International Conference: The Future of Audio Entertainment Technology \& Cinema, Television and the Internet},
    year = {2015},
    month = mar,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17603},
    }

  • G. Kearney and T. Doyle, “Height perception in ambisonic based binaural decoding,” in Audio engineering society convention 139, 2015.
    [BibTeX] [Download PDF]
    @Conference{Kearney2015d,
    author = {G. Kearney and T. Doyle},
    title = {Height perception in ambisonic based binaural decoding},
    booktitle = {Audio Engineering Society Convention 139},
    year = {2015},
    month = oct,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17979},
    }

  • R. McIlraith, P. Walton, and J. Brereton, “The spatialised sonification of Drug-Enzyme interactions,” in The 21st international conference on auditory display (ICAD 2015), Graz, Austria, 2015, p. 323–324.
    [BibTeX]
    @INPROCEEDINGS{McIlraith2015-uz,
    title = "The Spatialised Sonification of {Drug-Enzyme} Interactions",
    booktitle = "The 21st International Conference on Auditory Display ({ICAD}
    2015)",
    author = "McIlraith, Rick and Walton, Paul and Brereton, Jude",
    pages = "323--324",
    institution = "Georgia Institute of Technology",
    year = 2015,
    address = "Graz, Austria",
    conference = "ICAD 2015"
    }

  • J. W. Newbold, A. Hunt, and J. Brereton, “Chemical spectral analysis through sonification.” 2015.
    [BibTeX]
    @InProceedings{Newbold2015,
    author = {J. W. Newbold and A. Hunt and J. Brereton},
    title = {Chemical spectral analysis through sonification},
    year = {2015},
    organization = {Georgia Institute of Technology},
    }

  • J. W. Newbold, A. Hunt, and J. Brereton, “Chemical spectral analysis through sonification,” in The 21st international conference on auditory display, 2015.
    [BibTeX]
    @INPROCEEDINGS{Newbold2015-gh,
    title = "Chemical spectral analysis through sonification",
    booktitle = "The 21st International Conference on Auditory Display",
    author = "Newbold, Joseph W and Hunt, Andy and Brereton, Jude",
    year = 2015,
    conference = " ICAD--2015",
    location = " Graz, Austria"
    }

  • D. Satongar, C. Pike, Y. Lam, and A. I. Tew, “The influence of headphones on the localisation of external loudspeaker sources,” Journal of the audio engineering society, vol. 63, iss. 10, p. 799–810, 2015. doi:10.17743/jaes.2015.0072
    [BibTeX] [Abstract] [Download PDF]

    When validating systems that use headphones to synthesise virtual soundsources, a direct comparison between virtual and real sources is sometimes needed. This paper considers the passive influence of headphones on the sound transmission and perception of external loudspeaker sources, for which physical measurements and behavioral data have been obtained. Physical measurements of the effect of a number of headphone models are given and analysed using an auditory filter bank and binaural cue extraction. These highlighted that all of the headphones had an effect on localisation cues and repositioning had a measurable effect. A localisation test was undertaken using one of the best performing headphones from the measurements. It was found thatthe presence of the headphones caused a small increase in localisation error and that the process of judging source location was different, highlighting a possible increase in the complexity of the localisation task.

    @Article{Satongar2015,
    author = {D. Satongar and C. Pike and Y. Lam and A. I. Tew},
    title = {The influence of headphones on the localisation of external loudspeaker sources},
    journal = {Journal of the Audio Engineering Society},
    year = {2015},
    volume = {63},
    number = {10},
    pages = {799--810},
    issn = {0004-7554},
    abstract = {When validating systems that use headphones to synthesise virtual soundsources, a direct comparison between virtual and real sources is sometimes needed. This paper considers the passive influence of headphones on the sound transmission and perception of external loudspeaker sources, for which physical measurements and behavioral data have been obtained. Physical measurements of the effect of a number of headphone models are given and analysed using an auditory filter bank and binaural cue extraction. These highlighted that all of the headphones had an effect on localisation cues and repositioning had a measurable effect. A localisation test was undertaken using one of the best performing headphones from the measurements. It was found thatthe presence of the headphones caused a small increase in localisation error and that the process of judging source location was different, highlighting a possible increase in the complexity of the localisation task.},
    doi = {10.17743/jaes.2015.0072},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18043},
    }

  • F. Stevens and D. T. Murphy, “Acoustic source localisation in an urban environment using early reflection information,” in Euronoise2015, Maastricht, The Netherlands, 2015.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2015,
    author = {F. Stevens and D. T. Murphy},
    title = {Acoustic source localisation in an urban environment using early reflection information},
    booktitle = {Euronoise2015},
    year = {2015},
    address = {Maastricht, The Netherlands},
    url = {https://www.conforg.fr/euronoise2015/proceedings/data/articles/000020.pdf},
    }

  • R. R. Vos, H. Daffern, and D. M. Howard, “A pilot study: investigating formant tuning in girl choristers through wide-band excitation.” 2015.
    [BibTeX]
    @conference{Vos2015,
    title = "A pilot study: Investigating formant tuning in girl choristers through wide-band excitation",
    author = "R. R. Vos and H. Daffern and D. M. Howard",
    year = "2015",
    language = "English",
    note = "The Voice Foundation Symposium ; Conference date: 26-05-2015 Through 31-05-2015",
    }

  • J. Yang and A. Hunt, “Real-time sonification of biceps curl exercise using muscular activity and kinematics.” 2015.
    [BibTeX]
    @InProceedings{Yang2015,
    author = {Yang, Jiajun and Hunt, Andy},
    title = {Real-time sonification of biceps curl exercise using muscular activity and kinematics},
    year = {2015},
    organization = {Georgia Institute of Technology},
    }

2014

  • D. Corrigan, F. Pitié, M. Gorzel, G. Kearney, V. Morris, A. Rankin, M. Linnane, M. O’Dea, C. Lee, and A. Kokaram, “A video database for the development of stereo-3D post-production algorithms,” Journal of virtual reality and broadcasting, vol. 10(2013), iss. 3, 2014.
    [BibTeX] [Abstract] [Download PDF]

    This paper introduces a database of freely available stereo-3D content designed to facilitate research in stereo post-production. It describes the structure and content of the database and provides some details about how the material was gathered. The database includes examples of many of the scenarios characteristic to broadcast footage. Material was gathered at different locations including a studio with controlled lighting and both indoor and outdoor on-location sites with more restricted lighting control. The database also includes video sequences with accompanying 3D audio data recorded in an Ambisonics format. An intended consequence of gathering the material is that the database contains examples of degradations that would be commonly present in real-world scenarios. This paper describes one such artefact caused by uneven exposure in the stereo views, causing saturation in the over-exposed view. An algorithm for the restoration of this artefact is proposed in order to highlight the usefuiness of the database.

    @Article{Corrigan2014,
    author = {D. Corrigan and F. Piti{\'e} and M. Gorzel and G. Kearney and V. Morris and A. Rankin and M. Linnane and M. O'Dea and C. Lee and A. Kokaram},
    title = {A video database for the development of stereo-3{D} post-production algorithms},
    journal = {Journal of Virtual Reality and Broadcasting},
    year = {2014},
    volume = {10(2013)},
    number = {3},
    issn = {1860-2037},
    abstract = {This paper introduces a database of freely available stereo-3D content designed to facilitate research in stereo post-production. It describes the structure and content of the database and provides some details about how the material was gathered. The database includes examples of many of the scenarios characteristic to broadcast footage. Material was gathered at different locations including a studio with controlled lighting and both indoor and outdoor on-location sites with more restricted lighting control. The database also includes video sequences with accompanying 3D audio data recorded in an Ambisonics format. An intended consequence of gathering the material is that the database contains examples of degradations that would be commonly present in real-world scenarios. This paper describes one such artefact caused by uneven exposure in the stereo views, causing saturation in the over-exposed view. An algorithm for the restoration of this artefact is proposed in order to highlight the usefuiness of the database.},
    keywords = {post production; stereo 3D; video-database},
    url = {http://nbn-resolving.de/urn:nbn:de:0009-6-37805},
    }

  • M. Gorzel, G. Kearney, and F. Boland, “Investigation of ambisonic rendering of elevated sound sources,” in Audio engineering society conference: 55th international conference: spatial audio, 2014.
    [BibTeX] [Download PDF]
    @Conference{Gorzel2014,
    author = {M. Gorzel and G. Kearney and F. Boland},
    title = {Investigation of ambisonic rendering of elevated sound sources},
    booktitle = {Audio Engineering Society Conference: 55th International Conference: Spatial Audio},
    year = {2014},
    month = aug,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17385},
    }

  • C. T. Jin, P. Guillon, N. Epain, R. Zolfaghari, V. van Schaik, A. I. Tew, C. Hetherington, and J. Thorpe, “Creating the Sydney York morphological and acoustic recordings of ears database,” Ieee transactions on multimedia, vol. 16, iss. 1, p. 37–46, 2014. doi:10.1109/TMM.2013.2282134
    [BibTeX]
    @Article{Jin2014,
    author = {C. T. Jin and P. Guillon and N. Epain and R. Zolfaghari and V. {van Schaik} and A. I. Tew and C. Hetherington and J. Thorpe},
    title = {Creating the {S}ydney {Y}ork morphological and acoustic recordings of ears database},
    journal = {IEEE Transactions on Multimedia},
    year = {2014},
    volume = {16},
    number = {1},
    pages = {37--46},
    month = jan,
    issn = {1520-9210},
    doi = {10.1109/TMM.2013.2282134},
    keywords = {Head-related transfer function, 3D audio, morphological data, fast multipole boundary element method, 3D mesh models, virtual auditory space},
    publisher = {Institute of Electrical and Electronics Engineers Inc.},
    }

  • T. Neate, N. Degara, A. Hunt, and F. Nagel, “A generic evaluation model for auditory feedback in complex visual searches.” 2014.
    [BibTeX]
    @InProceedings{Neate2014,
    author = {T. Neate and N. Degara and A. Hunt and F. Nagel},
    title = {A generic evaluation model for auditory feedback in complex visual searches},
    year = {2014},
    organization = {Georgia Institute of Technology},
    }

  • B. C. O’Toole, M. Gorzel, I. J. Kelly, L. O’Sullivan, G. Kearney, and F. Boland, “Virtual 5.1 surround sound localization using head-tracking devices,” in 25th iet irish signals systems conference 2014 and 2014 china-ireland international conference on information and communications technologies (issc 2014/ciict 2014), 2014, p. 41–46. doi:10.1049/cp.2014.0656
    [BibTeX]
    @InProceedings{OToole2014,
    author = {B. C. O'Toole and M. Gorzel and I. J. Kelly and L. O'Sullivan and G. Kearney and F. Boland},
    title = {Virtual 5.1 surround sound localization using head-tracking devices},
    booktitle = {25th IET Irish Signals Systems Conference 2014 and 2014 China-Ireland International Conference on Information and Communications Technologies (ISSC 2014/CIICT 2014)},
    year = {2014},
    pages = {41--46},
    month = jun,
    doi = {10.1049/cp.2014.0656},
    keywords = {audio signal processing;loudspeakers;inertial measurement unit;Oculus Rift;Microsoft Kinect face-tracking;exploratory head movements;head-tracking devices;virtual source localization accuracy;sound localization accuracy;virtual 5.1 surround sound localization;virtual 5.1 loudspeaker arrays;surround sound;binaural;head tracking;spatial audio},
    }

  • C. Pike, F. Melchior, and A. I. Tew, “Assessing the plausibility of non-individualised dynamic binaural synthesis in a small room,” in Audio engineering society, 55th international conference, 2014.
    [BibTeX] [Abstract] [Download PDF]

    This paper presents a subjective assessment of the plausibility of a non-individualised dynamic binaural sound system, created using a dataset of binaural room impulse responses measured with a dummy head microphone. A signal detection theory analysis was carried out on the results, to assess the sensory difference between the simulation and reality. The design and objective validation of the system is also presented. A small but meaningful sensory difference was observed between real and binaurally simulated loudspeaker sounds.

    @InProceedings{Pike2014,
    author = {C. Pike and F. Melchior and A. I. Tew},
    title = {Assessing the plausibility of non-individualised dynamic binaural synthesis in a small room},
    booktitle = {Audio Engineering Society, 55th International Conference},
    year = {2014},
    abstract = {This paper presents a subjective assessment of the plausibility of a non-individualised dynamic binaural sound system, created using a dataset of binaural room impulse responses measured with a dummy head microphone. A signal detection theory analysis was carried out on the results, to assess the sensory difference between the simulation and reality. The design and objective validation of the system is also presented. A small but meaningful sensory difference was observed between real and binaurally simulated loudspeaker sounds.},
    day = {26},
    isbn = {9781634397599},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17353},
    }

  • F. Stevens and D. T. Murphy, “Spatial impulse response measurement in an urban environment,” in Audio engineering society conference: 55th international conference: spatial audio, Helsinki, Finland, 2014.
    [BibTeX] [Download PDF]
    @InProceedings{Stevens2014,
    author = {Stevens, F. and Murphy, D. T.},
    title = {Spatial impulse response measurement in an urban environment},
    booktitle = {Audio Engineering Society Conference: 55th International Conference: Spatial Audio},
    year = {2014},
    address = {Helsinki, Finland},
    month = aug,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=17355},
    }

  • R. Zolfaghari, N. Epain, C. Jin, A. I. Tew, and J. Glaunès, “A multiscale LDDMM template algorithm for studying ear shape variations.” 2014. doi:10.1109/ICSPCS.2014.7021100
    [BibTeX] [Abstract]

    This paper describes a method to establish an average human ear shape across a population of ears by se-quentially applying the Large Deformation Diffeomorphic Metric Mapping (LDDMM) framework at successively smaller physical scales. Determining such a population average ear shape, also referred to here as a template ear, is an essential step in studying the statistics of ear shapes because it allows the variations in ears to be studied relative to a common template shape. Our interest in the statistics of ear shapes stems from our desire to understand the relationship between ear morphology and the head-related impulse response (HRIR) filters that are essential for rendering 3D audio over headphones. The shape of the ear varies among listeners and is as individualized as a fingerprint. Because the acoustic filtering properties of the ears depend on their shape, the HRIR filters required for rendering 3D audio are also individualized. The contribution of this work is the demonstration of a sequential multiscale approach to creating a population template ear shape using the LDDMM framework. In particular we apply our sequential multiscale algorithm to a small population of synthetic ears in order to analyse its performance given a known reference ear shape.

    @Conference{Zolfaghari2014,
    author = {R. Zolfaghari and N. Epain and C. Jin and A. I. Tew and J. Glaun{\`e}s},
    title = {A multiscale {LDDMM} template algorithm for studying ear shape variations},
    year = {2014},
    month = dec,
    note = {INSPEC accession number 14881796; IEEE 8th International Conference on Signal Processing and Communication Systems (ICSPCS), 2014, , QLD ; Conference date: 15-12-2014 Through 17-12-2014},
    abstract = {This paper describes a method to establish an average human ear shape across a population of ears by se-quentially applying the Large Deformation Diffeomorphic Metric Mapping (LDDMM) framework at successively smaller physical scales. Determining such a population average ear shape, also referred to here as a template ear, is an essential step in studying the statistics of ear shapes because it allows the variations in ears to be studied relative to a common template shape. Our interest in the statistics of ear shapes stems from our desire to understand the relationship between ear morphology and the head-related impulse response (HRIR) filters that are essential for rendering 3D audio over headphones. The shape of the ear varies among listeners and is as individualized as a fingerprint. Because the acoustic filtering properties of the ears depend on their shape, the HRIR filters required for rendering 3D audio are also individualized. The contribution of this work is the demonstration of a sequential multiscale approach to creating a population template ear shape using the LDDMM framework. In particular we apply our sequential multiscale algorithm to a small population of synthetic ears in order to analyse its performance given a known reference ear shape.},
    doi = {10.1109/ICSPCS.2014.7021100},
    isbn = {978-1-4799-5255-7},
    keywords = {ear shape, morphology, head-related transfer function, SYMARE},
    }

2013

  • H. Daffern and J. Brereton, “Testing a new protocol to measure tuning response behaviour in solo voice ensemble singing.” 2013.
    [BibTeX]
    @INPROCEEDINGS{Daffern2013,
    title = "Testing a new protocol to measure tuning response
    behaviour in solo voice ensemble singing",
    author = "Daffern, Helena and Brereton, Jude",
    year = 2013,
    conference = "Sound and Music Computing Conference 2013",
    location = "Stockholm, Sweden"
    }

  • H. Daffern and D. M. Howard, “Analysing changing vibrato behaviour in solo voice ensemble singing,” in Pan european voice conference, 2013.
    [BibTeX] [Abstract]

    Perceptually vibrato is known to be an important characteristic in choral singing, with conductors specifically directing singers in their use of vibrato when considering ‘choral blend’, tuning, expression and taste. Particularly in Renaissance and Baroque repertoire vibrato is conventionally used as an ornament, and current convention in vocal ensemble performance is to restrict the use of vibrato for musical affect at specific moments in the music, such as at the resolution of a suspension. Whilst vibrato in solo singing is well-researched, consideration of vibrato behaviour of individuals in an ensemble setting is still in its infancy. This is in part due to the difficulty of isolating the individual voices for analysis. In order to construct a method for analysing vocal characteristics of individual singers within a group, a protocol was designed and pilot tested. Laryngograph and audio recordings were made of student vocal quartets from the University of York. The laryngograph data for each singer was analysed to extract fundamental frequency data. The fundamental frequency data was then used to analyse vibrato characteristics of the individual singers, considering in particular adapting vibrato for ‘choral blend’ and the use of vibrato as a musical ornament in suspensions. The perceptual relevance of the vibrato behaviour was assessed through a listening test. The results of vibrato characteristics of individual singers are considered alongside their perceptual relevance. The accuracy of the data is also considered alongside the implications of applying this protocol to analyse other vocal characteristics in choral singers.

    @inproceedings{Daffern2013b,
    title = "Analysing changing vibrato behaviour in solo voice ensemble singing",
    abstract = "Perceptually vibrato is known to be an important characteristic in choral singing, with conductors specifically directing singers in their use of vibrato when considering ‘choral blend’, tuning, expression and taste. Particularly in Renaissance and Baroque repertoire vibrato is conventionally used as an ornament, and current convention in vocal ensemble performance is to restrict the use of vibrato for musical affect at specific moments in the music, such as at the resolution of a suspension. Whilst vibrato in solo singing is well-researched, consideration of vibrato behaviour of individuals in an ensemble setting is still in its infancy. This is in part due to the difficulty of isolating the individual voices for analysis. In order to construct a method for analysing vocal characteristics of individual singers within a group, a protocol was designed and pilot tested. Laryngograph and audio recordings were made of student vocal quartets from the University of York. The laryngograph data for each singer was analysed to extract fundamental frequency data. The fundamental frequency data was then used to analyse vibrato characteristics of the individual singers, considering in particular adapting vibrato for ‘choral blend’ and the use of vibrato as a musical ornament in suspensions. The perceptual relevance of the vibrato behaviour was assessed through a listening test. The results of vibrato characteristics of individual singers are considered alongside their perceptual relevance. The accuracy of the data is also considered alongside the implications of applying this protocol to analyse other vocal characteristics in choral singers.",
    author = "H. Daffern and D. M. Howard",
    year = "2013",
    language = "English",
    booktitle = "Pan European Voice Conference",
    }

  • D. M. Howard, H. Daffern, and J. Brereton, “Four-part choral synthesis system for investigating intonation in a cappella choral singing,” Logoped. phoniatr. vocol., vol. 38, iss. 3, p. 135–142, 2013.
    [BibTeX]
    @ARTICLE{Howard2013-if,
    title = "Four-part choral synthesis system for investigating intonation in
    a cappella choral singing",
    author = "Howard, David M and Daffern, Helena and Brereton, Jude",
    journal = "Logoped. Phoniatr. Vocol.",
    volume = 38,
    number = 3,
    pages = "135--142",
    month = oct,
    year = 2013,
    language = "en"
    }

  • G. Kearney, “Sound field rendering for distributed audiences,” in Audio engineering society conference: 52nd international conference: sound field control – engineering and perception, 2013.
    [BibTeX] [Download PDF]
    @Conference{Kearney2013,
    author = {G. Kearney},
    title = {Sound field rendering for distributed audiences},
    booktitle = {Audio Engineering Society Conference: 52nd International Conference: Sound Field Control - Engineering and Perception},
    year = {2013},
    month = sep,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=16902},
    }

  • M. Lopez, S. Pauletto, and G. Kearney, “The application of impulse response measurement techniques to the study of the acoustics of Stonegate, a performance space used in medieval English drama,” Acta acustica united with acustica, vol. 99, iss. 1, p. 98–109, 2013.
    [BibTeX] [Download PDF]
    @Article{Lopez2013,
    author = {Lopez, Mariana and Pauletto, Sandra and Kearney, Gavin},
    title = {The application of impulse response measurement techniques to the study of the acoustics of {S}tonegate, a performance space used in medieval {E}nglish drama},
    journal = {Acta acustica united with acustica},
    year = {2013},
    volume = {99},
    number = {1},
    pages = {98--109},
    publisher = {S. Hirzel Verlag},
    url = { https://www.ingentaconnect.com/content/dav/aaua/2013/00000099/00000001/art00014 },
    }

  • P. Lunn and A. Hunt, “Phantom signals: Erroneous perception observed during the audification of radio astronomy data.” 2013.
    [BibTeX]
    @InProceedings{Lunn2013,
    author = {P. Lunn and A. Hunt},
    title = {Phantom signals: {E}rroneous perception observed during the audification of radio astronomy data},
    year = {2013},
    organization = {Georgia Institute of Technology},
    }

  • D. Satongar, C. Pike, Y. Lam, and A. I. Tew, “On the influence of headphones on localisation of loudspeaker sources.” 2013, p. 1–18.
    [BibTeX] [Abstract]

    When validating systems that use headphones to synthesise virtual sound sources, a direct comparison between virtual and real sources is sometimes needed. This paper presents objective and subjective measurements of the influence of headphones on external loudspeaker sources. Objective measurements of the effect of a number of headphone models are given and analysed using an auditory filter bank and binaural cue extraction. Objective results highlight that all of the headphones had an effect on localisation cues. A subjective localisation test was undertaken using one of the best performing headphones from the measurements. It was found that the presence of the headphones caused a small increase in localisation error but also that the process of judging source location was different, highlighting a possible increase in the complexity of the localisation task.

    @Conference{Satongar2013,
    author = {D. Satongar and C. Pike and Y. Lam and A. I. Tew},
    title = {On the influence of headphones on localisation of loudspeaker sources},
    year = {2013},
    pages = {1--18},
    month = oct,
    note = {Also published as BBC Research \& Development White Paper WHP 276; 135th AES Convention ; Conference date: 17-10-2013 Through 20-10-2013},
    abstract = {When validating systems that use headphones to synthesise virtual sound sources, a direct comparison between virtual and real sources is sometimes needed. This paper presents objective and subjective measurements of the influence of headphones on external loudspeaker sources. Objective measurements of the effect of a number of headphone models are given and analysed using an auditory filter bank and binaural cue extraction. Objective results highlight that all of the headphones had an effect on localisation cues. A subjective localisation test was undertaken using one of the best performing headphones from the measurements. It was found that the presence of the headphones caused a small increase in localisation error but also that the process of judging source location was different, highlighting a possible increase in the complexity of the localisation task.},
    day = {17},
    }

  • J. Yang and A. Hunt, “Sonic trainer: Real-time sonification of muscular activity and limb positions in general physical exercise,” in Proceedings of ison 2013, 4th interactive sonification workshop, 2013.
    [BibTeX]
    @InProceedings{Yang2013,
    author = {J. Yang and A. Hunt},
    title = {Sonic trainer: {R}eal-time sonification of muscular activity and limb positions in general physical exercise},
    booktitle = {Proceedings of ISon 2013, 4th Interactive Sonification Workshop},
    year = {2013},
    }

2012

  • J. Brereton, D. Murphy, and D. Howard, “A loudspeaker-based room acoustics simulation for real-time musical performance,” in Audio engineering society conference: UK 25th conference: spatial audio in today’s 3D world, 2012.
    [BibTeX]
    @INPROCEEDINGS{Brereton2012-hl,
    title = "A loudspeaker-based room acoustics simulation for real-time
    musical performance",
    booktitle = "Audio Engineering Society Conference: {UK} 25th Conference:
    Spatial Audio in Today's {3D} World",
    author = "Brereton, Jude and Murphy, Damian and Howard, David",
    publisher = "aes.org",
    institution = "Audio Engineering Society",
    year = 2012
    }

  • R. Bresin, T. Hermann, and A. Hunt, Interactive sonificationSpringer, 2012.
    [BibTeX]
    @Misc{Bresin2012,
    author = {Bresin, Roberto and Hermann, Thomas and Hunt, Andy},
    title = {Interactive sonification},
    year = {2012},
    publisher = {Springer},
    }

  • H. Daffern and D. M. Howard, “Spectral characteristics of the baroque trumpet: a case study.” 2012, p. 3969–3974.
    [BibTeX] [Abstract]

    The baroque trumpet like many reconstructed or original historical instruments is very difficult to play compared to its modern day equivalent. It is commonly accepted that historically informed performances need a specialist baroque trumpeter performing on a specialist instrument in order to achieve the desired sound. Particularly, there are certain timbral characteristics and expectations concerning the range of dynamics employed that are highlighted by today’s early music conductors and players as being unique to and expected of the baroque trumpet. An opportunity arose to record a world-renowned baroque trumpeter playing original trumpets from 1780, 1788, 1912 and 1967 in the fully (6-sided) acoustic anechoic chamber at the University of York. Due to his strong instinct as a player that the mouthpiece is the most significant timbral characteristic of the instrument, performances on the later two trumpets were recorded using two different mouthpieces. The spectral characteristics of each of the instruments and the impact of changing the mouthpiece are analysed in terms of the spectral correlates of audible differences integral to the sound quality of each.

    @conference{Daffern2012,
    title = "Spectral characteristics of the baroque trumpet: a case study",
    abstract = "The baroque trumpet like many reconstructed or original historical instruments is very difficult to play compared to its modern day equivalent. It is commonly accepted that historically informed performances need a specialist baroque trumpeter performing on a specialist instrument in order to achieve the desired sound. Particularly, there are certain timbral characteristics and expectations concerning the range of dynamics employed that are highlighted by today’s early music conductors and players as being unique to and expected of the baroque trumpet. An opportunity arose to record a world-renowned baroque trumpeter playing original trumpets from 1780, 1788, 1912 and 1967 in the fully (6-sided) acoustic anechoic chamber at the University of York. Due to his strong instinct as a player that the mouthpiece is the most significant timbral characteristic of the instrument, performances on the later two trumpets were recorded using two different mouthpieces. The spectral characteristics of each of the instruments and the impact of changing the mouthpiece are analysed in terms of the spectral correlates of audible differences integral to the sound quality of each.",
    author = "H. Daffern and D. M. Howard",
    year = "2012",
    month = "4",
    day = "27",
    language = "English",
    pages = "3969--3974",
    note = "Proceedings of the Acoustics 2012 ; Conference date: 23-04-2012 Through 27-04-2012",
    }

  • H. Daffern, J. S. Brereton, and D. M. Howard, “The impact of vibrato usage on the perception of pitch in early music compared to opera.” 2012, p. 3949–3954.
    [BibTeX] [Abstract]

    Previous studies on the pitch of long-duration vibrato tones have typically used synthesised modulator tones to assess the perceived pitch in relation to its arithmetic or geometric mean fundamental frequency. In this study a listening test was conducted with expert listener subjects matching recorded vibrato tones sung by professional singers using a method of adjustment and free response paradigm as employed by van Besouw et al. Example tones selected from recordings by 16 singers were used in the test, 8 of whom were employed at the Royal Opera House, Covent Garden, and the remaining singers specialised in Early Music Performance. A previous study by Daffern (2008) in the vibrato usage by these singers shows a noticeable use difference in the use of vibrato between these performance groups, particularly in extent throughout long tones. The impact of these differences in vibrato will be assessed in terms of the perception of fundamental frequency in long tones as performed by the two groups of singer, and the effectiveness of using real recordings to assess listener perception of vibrato tones will be discussed.

    @conference{Daffern2012a,
    title = "The impact of vibrato usage on the perception of pitch in early music compared to opera",
    abstract = "Previous studies on the pitch of long-duration vibrato tones have typically used synthesised modulator tones to assess the perceived pitch in relation to its arithmetic or geometric mean fundamental frequency. In this study a listening test was conducted with expert listener subjects matching recorded vibrato tones sung by professional singers using a method of adjustment and free response paradigm as employed by van Besouw et al. Example tones selected from recordings by 16 singers were used in the test, 8 of whom were employed at the Royal Opera House, Covent Garden, and the remaining singers specialised in Early Music Performance. A previous study by Daffern (2008) in the vibrato usage by these singers shows a noticeable use difference in the use of vibrato between these performance groups, particularly in extent throughout long tones. The impact of these differences in vibrato will be assessed in terms of the perception of fundamental frequency in long tones as performed by the two groups of singer, and the effectiveness of using real recordings to assess listener perception of vibrato tones will be discussed.",
    keywords = "vibrato, acoustics, Perception, singing analysis, singing vocal performance",
    author = "H. Daffern and J. S. Brereton and D. M. Howard",
    year = "2012",
    month = "4",
    day = "27",
    language = "English",
    pages = "3949--3954",
    note = "Proceedings of the Acoustics 2012 ; Conference date: 23-04-2012 Through 27-04-2012",
    }

  • M. Gorzel, D. Corrigan, J. Squires, F. Boland, and G. Kearney, “Distance perception in real and virtual environments,” in Audio engineering society conference: uk 25th conference: spatial audio in today’s 3d world, 2012.
    [BibTeX] [Download PDF]
    @Conference{Gorzel2012,
    author = {Gorzel, Marcin and Corrigan, David and Squires, John and Boland, Frank and Kearney, Gavin},
    title = {Distance perception in real and virtual environments},
    booktitle = {Audio Engineering Society Conference: UK 25th Conference: Spatial Audio in Today's 3D World},
    year = {2012},
    month = mar,
    url = {http://www.aes.org/e-lib/browse.cfm?elib=18119},
    }

  • D. M. Howard, H. Daffern, and J. Brereton, “Quantitative voice quality analyses of a soprano singing early music in three different performance styles,” Biomed. signal process. control, vol. 7, iss. 1, p. 58–64, 2012.
    [BibTeX]
    @ARTICLE{Howard2012-mf,
    title = "Quantitative voice quality analyses of a soprano singing early
    music in three different performance styles",
    author = "Howard, David M and Daffern, Helena and Brereton, Jude",
    journal = "Biomed. Signal Process. Control",
    publisher = "Elsevier",
    volume = 7,
    number = 1,
    pages = "58--64",
    month = jan,
    year = 2012,
    keywords = "Soprano voice quality; Electrolaryngograph; Spectrograph;
    Hearing modelling spectrography; Vibrato; Larynx closed
    quotient; Early music"
    }

  • G. Kearney, M. Gorzel, H. Rice, and F. Boland, “Distance perception in interactive virtual acoustic environments using first and higher order ambisonic sound fields,” Acta acustica united with acustica, vol. 98, iss. 1, p. 61–71, 2012.
    [BibTeX] [Download PDF]
    @Article{Kearney2012,
    author = {Kearney, Gavin and Gorzel, Marcin and Rice, Henry and Boland, Frank},
    title = {Distance perception in interactive virtual acoustic environments using first and higher order ambisonic sound fields},
    journal = {Acta Acustica united with Acustica},
    year = {2012},
    volume = {98},
    number = {1},
    pages = {61--71},
    publisher = {S. Hirzel Verlag},
    url = { https://www.researchgate.net/publication/263380387_Distance_Perception_in_Interactive_Virtual_Acoustic_Environments_using_First_and_Higher_Order_Ambisonic_Sound_Fields },
    }

  • C. Masterson, G. Kearney, M. Gorzel, and F. M. Boland, “HRIR order reduction using approximate factorization,” Ieee transactions on audio, speech, and language processing, vol. 20, iss. 6, p. 1808–1817, 2012. doi:10.1109/TASL.2012.2189565
    [BibTeX] [Download PDF]
    @Article{Masterson2012,
    author = {C. Masterson and G. Kearney and M. Gorzel and F. M. Boland},
    title = {{HRIR} order reduction using approximate factorization},
    journal = {IEEE Transactions on Audio, Speech, and Language Processing},
    year = {2012},
    volume = {20},
    number = {6},
    pages = {1808--1817},
    month = aug,
    issn = {1558-7916},
    doi = {10.1109/TASL.2012.2189565},
    keywords = {acoustic signal processing;delays;transient response;HRIR order reduction;approximate factorization;factorization technique;head related impulse responses;close approximation;factorization algorithm;direction independent components;direction dependent components;low reconstruction error;multiple similar local minima;psychoacoustic significance;regularization technique;robust algorithm;minimum phase HRIR data;very short direction-dependent components;initial delay inclusive HRIR data;initial time delay;KEMAR databases;CIPIC databases;subjective listening tests;unfactorized HRIRs;truncated HRIRs;Ambisonic based virtual loudspeaker array;Ear;Time domain analysis;Loudspeakers;Equations;Frequency domain analysis;Delay;Databases;Acoustic signal processing;head related impulse response (HRIR);factorization},
    url = { https://ieeexplore.ieee.org/document/6161609 },
    }

  • J. E. McHugh, G. Kearney, H. Rice, and F. N. Newell, “The sound of the crowd: Auditory information modulates the perceived emotion of a crowd based on bodily expressions.,” Emotion, vol. 12, iss. 1, p. 120, 2012.
    [BibTeX] [Download PDF]
    @Article{McHugh2012,
    author = {J. E. McHugh and G. Kearney and H. Rice and F. N. Newell},
    title = {The sound of the crowd: {A}uditory information modulates the perceived emotion of a crowd based on bodily expressions.},
    journal = {Emotion},
    year = {2012},
    volume = {12},
    number = {1},
    pages = {120},
    publisher = {American Psychological Association},
    url = { https://www.ncbi.nlm.nih.gov/pubmed/21875188},
    }

  • N. Paterson, G. Kearney, K. Naliuka, T. Carrigy, M. Haahr, and F. Conway, “Viking ghost hunt: creating engaging sound design for location–aware applications,” International journal of arts and technology, vol. 6, iss. 1, p. 61–82, 2012. doi:10.1504/IJART.2013.050692
    [BibTeX]
    @Article{Paterson2012,
    author = {Paterson, Natasa and Kearney, Gavin and Naliuka, Katsiaryna and Carrigy, Tara and Haahr, Mads and Conway, Fionnuala},
    title = {Viking ghost hunt: creating engaging sound design for location--aware applications},
    journal = {International Journal of Arts and Technology},
    year = {2012},
    volume = {6},
    number = {1},
    pages = {61--82},
    doi = {10.1504/IJART.2013.050692},
    publisher = {Inderscience Publishers},
    }

  • A. I. Tew, C. T. Hetherington, and J. Thorpe, “Morphoacoustic perturbation analysis: Principles and validation.” 2012, p. 867–872.
    [BibTeX] [Abstract] [Download PDF]

    We present a frequency domain technique for investigating the relationship between acoustic properties of the human hearing system and the morphology responsible for creating them. Exploiting reciprocity, the boundary element method is applied to determine head-related transfer functions (HRTFs) for various directions and distances from a surface mesh model of a head and pinnae. Small orthogonal surface harmonic deformations are applied to the mesh one at a time and stored in a database together with the resulting, approximately linear, changes to the HRTFs (delta HRTFs). Once the computationally intensive process of constructing the database has been completed, identifying the morphological origins of arbitrary acoustic spectral features is very rapid. The method, which we term morphoacoustic perturbation analysis in the frequency domain (MPA-FD), is outlined and a proof-of-principle implementation described. MPA-FD is demonstrated by using it to determine the regions of the pinna responsible for determining the centre frequency of an HRTF notch and a peak. The predictions show good agreement with direct acoustic measurements.

    @Conference{Tew2012,
    author = {A. I. Tew and C. T. Hetherington and J. Thorpe},
    title = {Morphoacoustic perturbation analysis: {P}rinciples and validation},
    year = {2012},
    pages = {867--872},
    month = apr,
    note = {Proceedings of the Acoustics 2012 ; Conference date: 23-04-2012 Through 27-04-2012},
    abstract = {We present a frequency domain technique for investigating the relationship between acoustic properties of the human hearing system and the morphology responsible for creating them. Exploiting reciprocity, the boundary element method is applied to determine head-related transfer functions (HRTFs) for various directions and distances from a surface mesh model of a head and pinnae. Small orthogonal surface harmonic deformations are applied to the mesh one at a time and stored in a database together with the resulting, approximately linear, changes to the HRTFs (delta HRTFs). Once the computationally intensive process of constructing the database has been completed, identifying the morphological origins of arbitrary acoustic spectral features is very rapid. The method, which we term morphoacoustic perturbation analysis in the frequency domain (MPA-FD), is outlined and a proof-of-principle implementation described. MPA-FD is demonstrated by using it to determine the regions of the pinna responsible for determining the centre frequency of an HRTF notch and a peak. The predictions show good agreement with direct acoustic measurements.},
    day = {24},
    keywords = {acoustics, binaural, morphoacoustic, pinna, HRTF},
    url = {https://hal.archives-ouvertes.fr/hal-00811131},
    }

2011

  • J. Brereton, D. Murphy, and D. M. Howard, “Evaluating the auralization of performance spaces and its effect on singing performance,” in Audio engineering society convention 130, 2011.
    [BibTeX]
    @INPROCEEDINGS{Brereton2011-vz,
    title = "Evaluating the Auralization of Performance Spaces and its
    Effect on Singing Performance",
    booktitle = "Audio Engineering Society Convention 130",
    author = "Brereton, Judith and Murphy, Damian and Howard, David M",
    institution = "Audio Engineering Society",
    year = 2011
    }

  • H. Daffern and D. M. Howard, “Acoustic analysis and the identification of performance stress in singing.” 2011.
    [BibTeX]
    @conference{Daffern2011,
    title = "Acoustic analysis and the identification of performance stress in singing",
    author = "H. Daffern and D. M. Howard",
    year = "2011",
    month = "6",
    day = "3",
    language = "English",
    }

  • T. Hermann, A. Hunt, and J. G. Neuhoff, The Sonification Handbook, Logos Verlag Berlin, Germany, 2011.
    [BibTeX]
    @Book{Hermann2011,
    title = {The {S}onification {H}andbook},
    publisher = {Logos Verlag Berlin, Germany},
    year = {2011},
    author = {T. Hermann and A. Hunt and J. G. Neuhoff},
    }

  • M. Lopez, S. Pauletto, and G. Kearney, “Virtual acoustics and performance spaces in medieval English drama,” in Conference on the acoustics of ancient theatres, patras, greece, 2011.
    [BibTeX]
    @InProceedings{Lopez2011,
    author = {M. Lopez and S. Pauletto and G. Kearney},
    title = {Virtual acoustics and performance spaces in medieval {E}nglish drama},
    booktitle = {Conference on The Acoustics of Ancient Theatres, Patras, Greece},
    year = {2011},
    }

2010

  • D. Corrigan, P. Francois, V. Morris, A. Rankin, M. Linnane, G. Kearney, M. Gorzel, M. O’Dea, C. Lee, and A. Kokaram, “A video database for the development of stereo-3D post-production algorithms,” in 2010 conference on visual media production (cvmp), 2010, p. 64–73.
    [BibTeX]
    @InProceedings{Corrigan2010,
    author = {D. Corrigan and P. Francois and V. Morris and A. Rankin and M. Linnane and G. Kearney and M. Gorzel and M. O'Dea and C. Lee and A. Kokaram},
    title = {A video database for the development of stereo-3{D} post-production algorithms},
    booktitle = {2010 Conference on Visual Media Production (CVMP)},
    year = {2010},
    pages = {64--73},
    organization = {IEEE},
    }

  • H. Daffern and D. M. Howard, “Voice source comparison between modern singers of early music and opera,” Logopedics phoniatrics vocology, vol. 35, iss. 2, p. 68–73, 2010. doi:10.3109/14015439.2010.482861
    [BibTeX] [Abstract]

    An experiment was conducted comparing two subject groups, each comprised of eight professional singers specializing in a genre of classical music: early music or grand opera. Electroglottography was used to consider vocal characteristics idiomatic to each genre. Whilst there are clear differences in contact quotient between subjects, particularly when relationships between fundamental frequency (f0) and contact quotient (Qx) are considered, there is no apparent link between contact quotient behaviour and performance specialism based on the results of this sample.

    @article{Daffern2010,
    title = "Voice source comparison between modern singers of early music and opera",
    abstract = "An experiment was conducted comparing two subject groups, each comprised of eight professional singers specializing in a genre of classical music: early music or grand opera. Electroglottography was used to consider vocal characteristics idiomatic to each genre. Whilst there are clear differences in contact quotient between subjects, particularly when relationships between fundamental frequency (f0) and contact quotient (Qx) are considered, there is no apparent link between contact quotient behaviour and performance specialism based on the results of this sample.",
    keywords = "Contact quotient, early music, electrolaryngograph, opera, singing",
    author = "H. Daffern and D. M. Howard",
    year = "2010",
    month = "7",
    doi = "10.3109/14015439.2010.482861",
    language = "English",
    volume = "35",
    pages = "68--73",
    journal = "Logopedics Phoniatrics Vocology",
    issn = "1401-5439",
    publisher = "Informa Healthcare",
    number = "2",
    }

  • A. Edwards, A. Hunt, G. Hines, V. Jackson, A. Podvoiskis, R. Roseblade, and J. Stammers, “Sonification strategies for examination of biological cells.” 2010.
    [BibTeX]
    @InProceedings{Edwards2010,
    author = {Edwards, Alistair and Hunt, Andy and Hines, Genevi{\`e}ve and Jackson, Vanessa and Podvoiskis, Alyte and Roseblade, Richard and Stammers, Jon},
    title = {Sonification strategies for examination of biological cells},
    year = {2010},
    organization = {Georgia Institute of Technology},
    }

  • A. H. Moore, A. I. Tew, and R. Nicol, “An initial validation of individualized crosstalk cancellation filters for binaural perceptual experiments,” J. audio eng. soc, vol. 58, iss. 1/2, p. 36–45, 2010.
    [BibTeX] [Abstract] [Download PDF]

    Crosstalk cancellation provides a means of delivering binaural stimuli to a listener for psychoacoustic research which avoids many of the problems of using headphones in experiments. Using a highly sensitive discrimination paradigm, which addressed a variety of issues in previous, headphone-based experiments, this study aimed to determine whether a system using individual crosstalk cancellation filters can present binaural stimuli that are perceptually indistinguishable from a real sound source. The fast deconvolution with frequencydependent regularization method was used to design crosstalk cancellation filters. The reproduction loudspeakers were positioned at90 degrees azimuth, and the synthesized location was 0 degrees azimuth. Eight listeners were tested with noise, click trains, and pulsed tone stimuli. For the pulsed tone stimuli subjects were unable to discriminate between real and virtual sources. For the noise and click stimuli discrimination was marginally above chance, but well below the threshold of detection. That is, weak cues did exist but they were almost completely unreliable. The results suggest that this method of producing individualized crosstalk cancellation filters is suitable for binaural perceptual experiments.

    @Article{Moore2010,
    author = {A. H. Moore and A. I. Tew and R. Nicol},
    title = {An initial validation of individualized crosstalk cancellation filters for binaural perceptual experiments},
    journal = {J. Audio Eng. Soc},
    year = {2010},
    volume = {58},
    number = {1/2},
    pages = {36--45},
    abstract = {Crosstalk cancellation provides a means of delivering binaural stimuli to a listener for psychoacoustic research which avoids many of the problems of using headphones in experiments. Using a highly sensitive discrimination paradigm, which addressed a variety of issues in previous, headphone-based experiments, this study aimed to determine whether a system using individual crosstalk cancellation filters can present binaural stimuli that are perceptually indistinguishable from a real sound source. The fast deconvolution with frequencydependent regularization method was used to design crosstalk cancellation filters. The reproduction loudspeakers were positioned at90 degrees azimuth, and the synthesized location was 0 degrees azimuth. Eight listeners were tested with noise, click trains, and pulsed tone stimuli. For the pulsed tone stimuli subjects were unable to discriminate between real and virtual sources. For the noise and click stimuli discrimination was marginally above chance, but well below the threshold of detection. That is, weak cues did exist but they were almost completely unreliable. The results suggest that this method of producing individualized crosstalk cancellation filters is suitable for binaural perceptual experiments.},
    keywords = {audio, binaural, perceptual discrimination},
    url = {http://www.aes.org/e-lib/browse.cfm?elib=15240},
    }

2009

  • H. Daffern, “Vibrato production and its impact on spectral energy in the performance of early music,” in National early music association, 2009.
    [BibTeX]
    @inproceedings{Daffern2009,
    title = "Vibrato production and its impact on spectral energy in the performance of early music",
    author = "H. Daffern",
    year = "2009",
    month = "7",
    day = "8",
    language = "English",
    booktitle = "National Early Music Association",
    }

  • D. M. Howard, J. S. Brereton, and H. Daffern, “Case study of voice quality differences in a soprano singing in different early music performance styles,” in Models and analysis of vocal emissions for biomedical applications – 6th international workshop, maveba 2009, 2009, p. 175–178.
    [BibTeX] [Abstract]

    This paper considers the characteristics of three differing styles of singing early music, as characterized by Richard Bethell [1] of the National Early Music Association, UK. In particular, the sung outputs from a postgraduate soprano who was practiced in singing all three styles are analysed along with the output from an electrolaryngograph which provides data on cycle-by-cycle fundamental variation as well as vocal fold contact area. The results are compared and contrasted with those from a group of early music and opera singers analysed previously.

    @inproceedings{Daffern2009a,
    title = "Case study of voice quality differences in a soprano singing in different early Music performance styles",
    abstract = "This paper considers the characteristics of three differing styles of singing early music, as characterized by Richard Bethell [1] of the National Early Music Association, UK. In particular, the sung outputs from a postgraduate soprano who was practiced in singing all three styles are analysed along with the output from an electrolaryngograph which provides data on cycle-by-cycle fundamental variation as well as vocal fold contact area. The results are compared and contrasted with those from a group of early music and opera singers analysed previously.",
    keywords = "Closed quotient, Early music, Electrolaryngography, Opera, Singing, Voice acoustics, Voice analysis",
    author = "D. M. Howard and J. S. Brereton and H. Daffern",
    year = "2009",
    language = "English",
    isbn = "9788864530963",
    pages = "175--178",
    booktitle = "Models and Analysis of Vocal Emissions for Biomedical Applications - 6th International Workshop, MAVEBA 2009",
    publisher = "Firenze University Press",
    }

2007

  • D. M. Howard, H. Daffern, J. S. Brereton, G. F. Welch, E. Himonides, and A. W. Howard, “A real-time display system for singing development,” The journal of the acoustical society of america, vol. 121, iss. 5, 2007.
    [BibTeX]
    @article{Howard2007,
    title = "A Real-time Display System for Singing Development",
    author = "D. M. Howard and H. Daffern and J. S. Brereton and G. F. Welch and E. Himonides and A. W. Howard",
    year = "2007",
    month = "6",
    day = "5",
    language = "English",
    volume = "121",
    journal = "The Journal of the Acoustical Society of America",
    issn = "0001-4966",
    publisher = "Acoustical Society of America",
    number = "5",
    }

2006

  • H. Daffern, D. M. Howard, and P. Seymour, “Pilot study of the effects of period instruments on the voice.” 2006, p. 43.
    [BibTeX]
    @conference{Daffern2006,
    title = "Pilot study of the effects of period instruments on the voice",
    author = "H. Daffern and D. M. Howard and P Seymour",
    year = "2006",
    month = "5",
    day = "1",
    language = "Undefined/Unknown",
    pages = "43",
    }

  • The 3rd international physiology and acoustics of singing conference, D. M. Howard, J. S. Brereton, and H. Daffern, Eds., School of Arts and Humanities, Institute of Education, 2006.
    [BibTeX]
    @book{Howard2006,
    title = "The 3rd International Physiology and Acoustics of Singing Conference",
    editor = "D. M. Howard and J. S. Brereton and H. Daffern",
    note = "ISBN: 1-905351-04-6",
    year = "2006",
    language = "English",
    publisher = "School of Arts and Humanities, Institute of Education",
    }

2003

  • A. Hunt, M. M. Wanderley, and M. Paradis, “The importance of parameter mapping in electronic instrument design,” Journal of new music research, vol. 32, iss. 4, p. 429–440, 2003.
    [BibTeX]
    @Article{Hunt2003,
    author = {A. Hunt and M. M. Wanderley and M. Paradis},
    title = {The importance of parameter mapping in electronic instrument design},
    journal = {Journal of New Music Research},
    year = {2003},
    volume = {32},
    number = {4},
    pages = {429--440},
    publisher = {Taylor \& Francis},
    }