960 lines
79 KiB
BibTeX
960 lines
79 KiB
BibTeX
@article{Aaron2013,
|
||
title = {From {{Sonic Pi}} to Overtone: {{Creative}} Musical Experiences with Domain-Specific and Functional Languages},
|
||
author = {Aaron, Samuel and Blackwell, Alan F.},
|
||
year = {2013},
|
||
journal = {Proceedings of the ACM SIGPLAN International Conference on Functional Programming, ICFP},
|
||
pages = {35--46},
|
||
doi = {10.1145/2505341.2505346},
|
||
abstract = {Domain Specific and Functional languages provide an excellent linguistic context for exploring new forms of music notation - not just for formalising compositions but also for live interaction workflows. This experience report describes two novel live coding systems that employ code execution to modify live sounds and music. The first of these systems, Sonic Pi, aims at teaching core computing notions to school students using live-coded music as a means of stimulating and maintaining student engagement. We describe how an emphasis on a functional style improves the ease in which core computer science concepts can be communicated to students. Secondly we describe Overtone, a functional language and live coding environment aimed towards professional electronic musicians. We describe how Overtone's abstractions and architecture strongly benefit from a functional-oriented implementation. Both Sonic Pi and Overtone are freely available open-source platforms.},
|
||
isbn = {9781450323864},
|
||
keywords = {Computational thinking,Live coding,Pedagogy,Raspberry Pi,Sound synthesis},
|
||
file = {/Users/tomoya/Zotero/storage/BDBWTQLL/2505341.2505346.pdf;/Users/tomoya/Zotero/storage/I4Z4L95Y/2505341.2505346.pdf}
|
||
}
|
||
|
||
@article{abbott1981,
|
||
title = {The {{4CED Program}}},
|
||
author = {Abbott, Curtis},
|
||
year = {1981},
|
||
journal = {Computer Music Journal},
|
||
volume = {5},
|
||
number = {1},
|
||
eprint = {3679692},
|
||
eprinttype = {jstor},
|
||
pages = {13--33},
|
||
publisher = {The MIT Press},
|
||
issn = {0148-9267},
|
||
doi = {10.2307/3679692},
|
||
urldate = {2025-01-02},
|
||
file = {/Users/tomoya/Zotero/storage/SEJAZRKR/Abbott - 1981 - The 4CED Program.pdf}
|
||
}
|
||
|
||
@article{adkins2016,
|
||
title = {Post-{{Acousmatic Practice}}: {{Re-evaluating Schaeffer}}'s Heritage},
|
||
shorttitle = {Post-{{Acousmatic Practice}}},
|
||
author = {Adkins, Monty and Scott, Richard and Tremblay, Pierre Alexandre},
|
||
year = {2016},
|
||
month = aug,
|
||
journal = {Organised Sound},
|
||
volume = {21},
|
||
number = {2},
|
||
pages = {106--116},
|
||
issn = {1355-7718, 1469-8153},
|
||
doi = {10.1017/S1355771816000030},
|
||
urldate = {2025-01-21},
|
||
abstract = {This article posits the notion of the post-acousmatic. It considers the work of contemporary practitioners who are indebted to the Schaefferian heritage, but pursue alternative trajectories from the established canonical discourse of acousmatic music. It will outline the authors' definition of the term and also outline a network of elements such as time, rhythm, pitch, dynamics, noise and performance to discuss work that the authors' consider to be a critique, an augmentation and an outgrowth of acousmatic music and thinking.},
|
||
copyright = {https://www.cambridge.org/core/terms},
|
||
language = {en},
|
||
file = {/Users/tomoya/Zotero/storage/9MXHRA65/Adkins et al. - 2016 - Post-Acousmatic Practice Re-evaluating Schaeffer’.pdf}
|
||
}
|
||
|
||
@article{Anderson1990,
|
||
title = {A {{System}} for {{Computer Music Performance}}},
|
||
author = {Anderson, David P. and Kuivila, Ron},
|
||
year = {1990},
|
||
journal = {ACM Transactions on Computer Systems (TOCS)},
|
||
volume = {8},
|
||
number = {1},
|
||
pages = {56--82},
|
||
issn = {15577333},
|
||
doi = {10.1145/77648.77652},
|
||
abstract = {A computer music performance system 1990 is a computer system connected to input devices (including musical keyboards or other instruments) and to graphic and audio output devices. A human performer generates input events using the input devices. The CMPS responds to these events by computing and performing sequences of output actions whose intended timing is determined algorithmically. Because of the need for accurate timing of output actions, the scheduling requirements of a CMPS differ from those of general-purpose or conventional real-time systems. This paper describes the scheduling facilities of FORMULA, a CMPS used by many musicians. In addition to providing accurate timing of output action sequences, FORMULA provides other basic functions useful in musical applications: (1) per-process virtual time systems with independent relationships to real time; (2) process grouping mechanisms and language-level control structures with time-related semantics, and (3) integrated scheduling of tasks (such as compiling and editing) whose real-time constraints are less stringent than those of output action computations. {\copyright} 1990, ACM. All rights reserved.},
|
||
keywords = {Action buffering,deadline scheduling,Design,Human Factors,Languages,message-passing,Performance,process groups,virtual time systems},
|
||
file = {/Users/tomoya/Zotero/storage/627PI276/p56-anderson.pdf;/Users/tomoya/Zotero/storage/PA4GN5XG/p56-anderson.pdf}
|
||
}
|
||
|
||
@misc{astor_gwion_2017,
|
||
title = {Gwion},
|
||
author = {Astor, J{\'e}r{\'e}mie},
|
||
year = {2017},
|
||
urldate = {2022-01-27},
|
||
abstract = {:musical\_note: strongly-timed musical programming language},
|
||
copyright = {GPL-3.0},
|
||
howpublished = {https://github.com/Gwion/Gwion},
|
||
keywords = {audio,chuck,compiler,composition,hacktoberfest,interpreter,lang,language,music,programming-language,real-time,realtime-audio,sound,synth,synthesis}
|
||
}
|
||
|
||
@article{berg1979,
|
||
title = {{{PILE}}: {{A Language}} for {{Sound Synthesis}}},
|
||
shorttitle = {{{PILE}}},
|
||
author = {Berg, Paul},
|
||
year = {1979},
|
||
journal = {Computer Music Journal},
|
||
volume = {3},
|
||
number = {1},
|
||
eprint = {3679754},
|
||
eprinttype = {jstor},
|
||
pages = {30--41},
|
||
publisher = {The MIT Press},
|
||
issn = {0148-9267},
|
||
doi = {10.2307/3679754},
|
||
urldate = {2025-01-01},
|
||
file = {/Users/tomoya/Zotero/storage/H94X4M7S/Berg - 1979 - PILE A Language for Sound Synthesis.pdf}
|
||
}
|
||
|
||
@book{Born1995,
|
||
title = {Rationalizing {{Culture}}},
|
||
author = {Born, Georgina},
|
||
year = {1995},
|
||
number = {1},
|
||
publisher = {University of California Press},
|
||
urldate = {2021-10-10},
|
||
isbn = {0-520-20216-3}
|
||
}
|
||
|
||
@inproceedings{brandt2000,
|
||
title = {Temporal Type Constructors for Computer Music Programming},
|
||
booktitle = {Proceedings of {{International Computer Music Conference}}},
|
||
author = {Brandt, Eli},
|
||
year = {2000},
|
||
urldate = {2020-03-26},
|
||
abstract = {This paper introduces temporal type constructors to computer music programming, and shows how they make languages more expressive. Music programming involves time-structured data types such as audio, MIDI, control signals, and streams of spectral frames. Each computer music language supplies some fixed set of these. Temporal type constructors are instead a way for the programmer to invent these kinds of data, with the ability to manipulate them and their elements. Algorithms expressed in this way can be remarkably brief and clear; FOF (Rodet, 1984) is given as an example.},
|
||
file = {/Users/tomoya/Zotero/storage/4VMZB5ID/full-text.pdf}
|
||
}
|
||
|
||
@inproceedings{Coblenz2018,
|
||
title = {Interdisciplinary {{Programming Language Design}}},
|
||
booktitle = {Proceedings of the 2018 {{ACM SIGPLAN International Symposium}} on {{New Ideas}}, {{New Paradigms}}, and {{Reflections}} on {{Programming}} and {{Software}}},
|
||
author = {Coblenz, Michael and Aldrich, Jonathan and Myers, Brad A and Sunshine, Joshua and Sun, Joshua},
|
||
year = {2018},
|
||
pages = {14},
|
||
publisher = {ACM},
|
||
address = {New York, NY, USA},
|
||
urldate = {2021-06-02},
|
||
abstract = {Approaches for programming language design used commonly in the research community today center around theoretical and performance-oriented evaluation. Recently, researchers have been considering more approaches to language design, including the use of quantitative and qualitative user studies that examine how different designs might affect programmers. In this paper, we argue for an interdisci-plinary approach that incorporates many different methods in the creation and evaluation of programming languages. We argue that the addition of user-oriented design techniques can be helpful at many different stages in the programming language design process. CCS Concepts {$\bullet$} Software and its engineering {$\rightarrow$} General programming languages; {$\bullet$} Social and professional topics {$\rightarrow$} History of programming languages;},
|
||
isbn = {978-1-4503-6031-9},
|
||
keywords = {programming language design,programming language evaluation,user-centered design},
|
||
file = {/Users/tomoya/Zotero/storage/FIGKGQMH/Muller2020 (日本語).pdf;/Users/tomoya/Zotero/storage/IPSD32MQ/Coblenz et al. - 2018 - Interdisciplinary Programming Language Design(2).pdf;/Users/tomoya/Zotero/storage/P4SPTMMF/full-text.pdf}
|
||
}
|
||
|
||
@article{Dannenberg2018,
|
||
title = {Languages for {{Computer Music}}},
|
||
author = {Dannenberg, Roger B.},
|
||
year = {2018},
|
||
month = nov,
|
||
journal = {Frontiers in Digital Humanities},
|
||
volume = {5},
|
||
issn = {2297-2668},
|
||
doi = {10.3389/fdigh.2018.00026},
|
||
urldate = {2019-04-28},
|
||
file = {/Users/tomoya/Zotero/storage/L7F62UTL/Languages_for_Computer_Music.pdf;/Users/tomoya/Zotero/storage/QVM64Q68/Dannenberg - 2018 - Languages for Computer Music.pdf}
|
||
}
|
||
|
||
@inproceedings{Dannenberg2018ugg,
|
||
title = {{{UGG}}: {{A Unit Generator Generator}}},
|
||
booktitle = {Proceedings of the 2018 {{International Computer Music Conference}}},
|
||
author = {Dannenberg, Roger B},
|
||
year = {2018},
|
||
month = aug,
|
||
urldate = {2020-02-28},
|
||
abstract = {Unit generators are primary building blocks of music audio software. Unit generators aim to be both efficient and flexible, but these goals are often in opposition. As designers trade off efficiency against flexibility, many designs emerge, leading to a multitude of incompatible implementations. Thus, there are many incompatible unit generator libraries, each representing substantial effort. The present work suggests that unit generators can be written in a functional style using a conventional language with operator overloading, and an easily modifiable "back end" can generate efficient code. A prototype of this method, the Unit Generator Generator (UGG) system can be tailored quickly to target many unit generator designs. Computations can be shared across unit generators by defining simple functions, leading to an even more compact and expressive notation.},
|
||
file = {/Users/tomoya/Zotero/storage/7PHU8UVW/full-text.pdf}
|
||
}
|
||
|
||
@article{davis_very_1994,
|
||
title = {Very Early Computer Music},
|
||
author = {Davis, Donald},
|
||
year = {1994},
|
||
journal = {Resurrection The Bulletin of the Computer Conservation Society},
|
||
volume = {10},
|
||
pages = {19--20},
|
||
issn = {09587403},
|
||
urldate = {2022-01-02},
|
||
file = {/Users/tomoya/Zotero/storage/CJHRSC7D/res10.pdf}
|
||
}
|
||
|
||
@article{doornbusch2017,
|
||
title = {Early {{Computer Music Experiments}} in {{Australia}} and {{England}}},
|
||
author = {Doornbusch, Paul},
|
||
year = {2017},
|
||
month = aug,
|
||
journal = {Organised Sound},
|
||
volume = {22},
|
||
number = {2},
|
||
pages = {297--307},
|
||
publisher = {Cambridge University Press},
|
||
issn = {1355-7718},
|
||
doi = {10.1017/S1355771817000206},
|
||
urldate = {2020-03-31},
|
||
abstract = {{$<$}p{$>$} This article documents the early experiments in both Australia and England to make a computer play music. The experiments in England with the Ferranti Mark 1 and the Pilot ACE (practically undocumented at the writing of this article) and those in Australia with CSIRAC (Council for Scientific and Industrial Research Automatic Computer) are the oldest known examples of using a computer to play music. Significantly, they occurred some six years before the experiments at Bell Labs in the USA. Furthermore, the computers played music in real time. These developments were important, and despite not directly leading to later highly significant developments such as those at Bell Labs under the direction of Max Mathews, these forward-thinking developments in England and Australia show a history of computing machines being used musically since the earliest development of those machines. \textsuperscript{1} {$<$}/p{$>$}},
|
||
file = {/Users/tomoya/Zotero/storage/2VKKHWML/full-text.pdf}
|
||
}
|
||
|
||
@book{emerson2014,
|
||
title = {Reading {{Writing Interfaces}}: {{From}} the {{Digital}} to the {{Bookbound}}},
|
||
author = {Emerson, Lori},
|
||
year = {2014},
|
||
month = nov,
|
||
publisher = {Univ of Minnesota Press},
|
||
isbn = {978-0-8166-9126-5}
|
||
}
|
||
|
||
@inproceedings{holbrook2022,
|
||
title = {Computer Music and Post-Acousmatic Practices: {{International Computer Music Conference}} 2022},
|
||
shorttitle = {Computer Music and Post-Acousmatic Practices},
|
||
booktitle = {Proceedings of the {{International Computer Music Conference}}, {{ICMC}} 2022},
|
||
author = {Holbrook, Ulf and Rudi, Joran},
|
||
editor = {Torre, Giuseppe},
|
||
year = {2022},
|
||
month = jul,
|
||
series = {International {{Computer Music Conference}}, {{ICMC Proceedings}}},
|
||
pages = {140--144},
|
||
publisher = {International Computer Music Association},
|
||
address = {San Francisco},
|
||
urldate = {2024-12-11},
|
||
abstract = {This short paper considers the practices of computer music through a perspective of the post-acousmatic. As the majority of music is now made using computers, the question emerges: How relevant are the topics, methods, andconventions from the ``historical'' genre of computer music? Originally an academic genre confined to large mainframes, computer music's tools and conventions have proliferated and spread to all areas of music-making. As agenre steeped in technological traditions, computer music is often primarily concerned with the technologies of its own making, and in this sense isolated from the social conditions of musical practice. The post-acousmatic is offeredas a methodological perspective to understand technology based music, its histories, and entanglements.},
|
||
keywords = {Computer music,Post-Acousmatic Practice},
|
||
file = {/Users/tomoya/Zotero/storage/NBRFF5ND/Holbrook et al. - Computer music and post-acousmatic practices.pdf}
|
||
}
|
||
|
||
@article{innis_sound_1968,
|
||
title = {Sound {{Synthesis}} by {{Computer}}: {{Musigol}}, a {{Program Written Entirely}} in {{Extended Algol}}},
|
||
shorttitle = {Sound {{Synthesis}} by {{Computer}}},
|
||
author = {Innis, Donald Mac},
|
||
year = {1968},
|
||
journal = {Perspectives of New Music},
|
||
volume = {7},
|
||
number = {1},
|
||
eprint = {832426},
|
||
eprinttype = {jstor},
|
||
pages = {66--79},
|
||
publisher = {Perspectives of New Music},
|
||
issn = {0031-6016},
|
||
doi = {10.2307/832426},
|
||
urldate = {2022-01-04},
|
||
file = {/Users/tomoya/Zotero/storage/DYXDF5EH/Innis - 1968 - Sound Synthesis by Computer Musigol, a Program Wr.pdf}
|
||
}
|
||
|
||
@misc{kay2019,
|
||
title = {American Computer Pioneer {{Alan Kay}}'s Concept, the {{Dynabook}}, Was Published in 1972. {{How}} Come {{Steve Jobs}} and {{Apple iPad}} Get the Credit for Tablet Invention?},
|
||
author = {Kay, Alan C.},
|
||
year = {2019},
|
||
month = apr,
|
||
journal = {Quora},
|
||
urldate = {2022-01-25},
|
||
abstract = {Answer (1 of 4): The Dynabook idea happened in 1968. But the simple part of the idea --- a personal computer on the back of a flat screen display with a stylus and touch sensitivity --- is hard to consider a real invention given: * Flat-screen displays. I saw the first University of Illinois one i...},
|
||
howpublished = {https://www.quora.com/American-computer-pioneer-Alan-Kay-s-concept-the-Dynabook-was-published-in-1972-How-come-Steve-Jobs-and-Apple-iPad-get-the-credit-for-tablet-invention},
|
||
language = {en},
|
||
file = {/Users/tomoya/Zotero/storage/52TPMQQG/American-computer-pioneer-Alan-Kay-s-concept-the-Dynabook-was-published-in-1972-How-come-Steve-.html}
|
||
}
|
||
|
||
@inproceedings{kirkbride2016foxdot,
|
||
title = {{{FoxDot}}: {{Live}} Coding with Python and Supercollider},
|
||
booktitle = {Proceedings of the {{International Conference}} on {{Live Interfaces}}},
|
||
author = {Kirkbride, Ryan},
|
||
year = {2016},
|
||
pages = {194--198}
|
||
}
|
||
|
||
@misc{lan_glicol_2020,
|
||
title = {Glicol},
|
||
author = {Lan, Qichao},
|
||
year = {2020},
|
||
urldate = {2025-01-28},
|
||
howpublished = {https://glicol.org/},
|
||
file = {/Users/tomoya/Zotero/storage/9DZAAT5M/glicol.org.html}
|
||
}
|
||
|
||
@inproceedings{Lattner,
|
||
title = {{{LLVM}}: {{A Compilation Framework}} for {{Lifelong Program Analysis}} \& {{Transformation}}},
|
||
booktitle = {Proceedings of the {{International Symposium}} on {{Code Generation}} and {{Optimization}}: {{Feedback-Directed}} and {{Runtime Optimization}}},
|
||
author = {Lattner, Chris and Adve, Vikram},
|
||
year = {2004},
|
||
pages = {75},
|
||
publisher = {IEEE Computer Society},
|
||
urldate = {2019-05-29},
|
||
abstract = {This paper describes LLVM (Low Level Virtual Machine),a compiler framework designed to support transparent, lifelongprogram analysis and transformation for arbitrary programs,by providing high-level information to compilertransformations at compile-time, link-time, run-time, and inidle time between runs.LLVM defines a common, low-levelcode representation in Static Single Assignment (SSA) form,with several novel features: a simple, language-independenttype-system that exposes the primitives commonly used toimplement high-level language features; an instruction fortyped address arithmetic; and a simple mechanism that canbe used to implement the exception handling features ofhigh-level languages (and setjmp/longjmp in C) uniformlyand efficiently.The LLVM compiler framework and coderepresentation together provide a combination of key capabilitiesthat are important for practical, lifelong analysis andtransformation of programs.To our knowledge, no existingcompilation approach provides all these capabilities.We describethe design of the LLVM representation and compilerframework, and evaluate the design in three ways: (a) thesize and effectiveness of the representation, including thetype information it provides; (b) compiler performance forseveral interprocedural problems; and (c) illustrative examplesof the benefits LLVM provides for several challengingcompiler problems.\vphantom\{\}, booktitle = \{Proceedings of the International Symposium on Code Generation and Optimization: Feedback-Directed and Runtime Optimization\vphantom\}},
|
||
file = {/Users/tomoya/Zotero/storage/6F75AM3H/full-text.pdf}
|
||
}
|
||
|
||
@article{Lazzarini2013,
|
||
title = {The {{Development}} of {{Computer Music Programming Systems}}},
|
||
author = {Lazzarini, Victor},
|
||
year = {2013},
|
||
journal = {Journal of New Music Research},
|
||
volume = {42},
|
||
number = {1},
|
||
pages = {97--110},
|
||
publisher = {Victor Lazzarini},
|
||
issn = {1744-5027},
|
||
doi = {10.1080/09298215.2013.778890},
|
||
urldate = {2020-04-06},
|
||
abstract = {This article traces the history and evolution of Music Programming , from the early off-line synthesis programs of the MUSIC N family to modern realtime interactive systems. It explores the main design characteristics of these systems and their impact on Computer Music. In chronological fashion, the article will examine, with code examples, the development of the early systems into the most common modern languages currently in use. In particular, we will focus on Csound, highlighting its main internal aspects and its applications. The text will also explore the various paradigms that have oriented the design and use of music programming systems. This discussion is completed by a consideration of computer music ecosystems and their pervasiveness in today's practice.},
|
||
file = {/Users/tomoya/Zotero/storage/GKN9EIMD/full-text.pdf;/Users/tomoya/Zotero/storage/VGL4U7D9/Lazzarini - 2013 - The Development of Computer Music Programming Systems.pdf}
|
||
}
|
||
|
||
@book{levy_hackers_2010,
|
||
title = {Hackers: {{Heroes}} of the {{Computer Revolution}} - 25th {{Anniversary Edition}}},
|
||
shorttitle = {Hackers},
|
||
author = {Levy, Steven},
|
||
year = {2010},
|
||
month = may,
|
||
edition = {1st edition},
|
||
publisher = {O'Reilly Media},
|
||
abstract = {This 25th anniversary edition of Steven Levy's classic book traces the exploits of the computer revolution's original hackers -- those brilliant and eccentric nerds from the late 1950s through the early '80s who took risks, bent the rules, and pushed the world in a radical new direction. With updated material from noteworthy hackers such as Bill Gates, Mark Zuckerberg, Richard Stallman, and Steve Wozniak, Hackers is a fascinating story that begins in early computer research labs and leads to the first home computers.Levy profiles the imaginative brainiacs who found clever and unorthodox solutions to computer engineering problems. They had a shared sense of values, known as "the hacker ethic," that still thrives today. Hackers captures a seminal period in recent history when underground activities blazed a trail for today's digital world, from MIT students finagling access to clunky computer-card machines to the DIY culture that spawned the Altair and the Apple II.},
|
||
language = {English}
|
||
}
|
||
|
||
@article{loy_life_2013,
|
||
title = {Life and {{Times}} of the {{Samson Box}}},
|
||
author = {Loy, D. Gareth},
|
||
year = {2013},
|
||
journal = {Computer Music Journal},
|
||
volume = {37},
|
||
number = {3},
|
||
eprint = {24265512},
|
||
eprinttype = {jstor},
|
||
pages = {26--48},
|
||
publisher = {The MIT Press},
|
||
issn = {0148-9267},
|
||
urldate = {2022-01-05},
|
||
abstract = {Peter Samson designed and built a real-time signal-processing computer for music applications in the 1970s. The Systems Concepts Digital Synthesizer ("Samson Box" for short) was installed at the Center for Computer Research in Music and Acoustics (CCRMA) at Stanford University in 1977, where it served for over a decade as the principal music generation system. It was an important landmark in the transition from general-purpose computers to real-time systems for music and audio, and helped set the stage for the sea change in the music industry from analog to digital technologies that began in the 1980s and continues at a rapid pace today. This article focuses on the historical context of the Samson Box, its development, its impact on the culture of CCRMA and the Stanford Artificial Intelligence Laboratory, its use for music research and composition at Stanford, and its role in the transformation of the music and audio industries from analog to digital practices. A list of compositions realized on the Samson Box is included, which shows that from 1978 to its decommissioning in 1992 it was used to create over 100 finished works, many of which were widely performed and were awarded prizes. A companion article provides a detailed architectural review and an interview with Pete Samson.},
|
||
file = {/Users/tomoya/Zotero/storage/4VBXXUJ5/Loy - 2013 - Life and Times of the Samson Box.pdf}
|
||
}
|
||
|
||
@article{loy1985,
|
||
title = {Programming Languages for Computer Music Synthesis, Performance, and Composition},
|
||
author = {Loy, Gareth and Abbott, Curtis},
|
||
year = {1985},
|
||
month = jun,
|
||
journal = {ACM Comput. Surv.},
|
||
volume = {17},
|
||
number = {2},
|
||
pages = {235--265},
|
||
issn = {0360-0300},
|
||
doi = {10.1145/4468.4485},
|
||
urldate = {2025-01-01},
|
||
abstract = {The development of formal, descriptive, and procedural notations has become a practical concern within the field of music now that computers are being applied to musical tasks. Music combines the real-time demands of performance with the intellectual demands of highly developed symbolic systems that are quite different from natural language. The richness and variety of these demands makes the programming language paradigm a natural one in the musical application of computers. This paradigm provides musicians with a fresh perspective on their work. At the same time, music is a very advanced form of human endeavor, making computer music applications a worthy challenge for computer scientists. In this paper we outline the traditional tasks and forms of representation in music, then proceed with a survey of languages that deal with music programming.},
|
||
file = {/Users/tomoya/Zotero/storage/N4NELPL9/Loy and Abbott - 1985 - Programming languages for computer music synthesis.pdf}
|
||
}
|
||
|
||
@inproceedings{lyon_we_2006,
|
||
title = {Do {{We Still Need Computer Music}}?},
|
||
booktitle = {{{EMS}}},
|
||
author = {Lyon, Eric},
|
||
year = {2006},
|
||
urldate = {2025-01-17},
|
||
file = {/Users/tomoya/Zotero/storage/SK2DXEE8/Do_We_Still_Need_Computer_Music.pdf}
|
||
}
|
||
|
||
@article{lyon2002,
|
||
title = {Dartmouth {{Symposium}} on the {{Future}} of {{Computer Music Software}}: {{A Panel Discussion}}},
|
||
shorttitle = {Dartmouth {{Symposium}} on the {{Future}} of {{Computer Music Software}}},
|
||
author = {Lyon, Eric and Mathews, Max and McCartney, James and Zicarelli, David and Vercoe, Barry and Loy, Gareth and Puckette, Miller},
|
||
year = {2002},
|
||
journal = {Computer Music Journal},
|
||
volume = {26},
|
||
number = {4},
|
||
eprint = {3681766},
|
||
eprinttype = {jstor},
|
||
pages = {13--30},
|
||
publisher = {The MIT Press},
|
||
issn = {0148-9267},
|
||
urldate = {2025-01-01}
|
||
}
|
||
|
||
@article{Magnusson2009,
|
||
title = {Of Epistemic Tools: {{Musical}} Instruments as Cognitive Extensions},
|
||
author = {Magnusson, Thor},
|
||
year = {2009},
|
||
month = aug,
|
||
journal = {Organised Sound},
|
||
volume = {14},
|
||
number = {2},
|
||
pages = {168--176},
|
||
issn = {13557718},
|
||
doi = {10.1017/S1355771809000272},
|
||
urldate = {2021-03-17},
|
||
abstract = {This paper explores the differences in the design and performance of acoustic and new digital musical instruments, arguing that with the latter there is an increased encapsulation of musical theory. The point of departure is the phenomenology of musical instruments, which leads to the exploration of designed artefacts as extensions of human cognition - as scaffolding onto which we delegate parts of our cognitive processes. The paper succinctly emphasises the pronounced epistemic dimension of digital instruments when compared to acoustic instruments. Through the analysis of material epistemologies it is possible to describe the digital instrument as an epistemic tool: a designed tool with such a high degree of symbolic pertinence that it becomes a system of knowledge and thinking in its own terms. In conclusion, the paper rounds up the phenomenological and epistemological arguments, and points at issues in the design of digital musical instruments that are germane due to their strong aesthetic implications for musical culture. {\copyright} 2009 Cambridge University Press.},
|
||
file = {/Users/tomoya/Zotero/storage/9SUU6WCD/magnusson.pdf;/Users/tomoya/Zotero/storage/HJFNX6AG/magnusson.pdf}
|
||
}
|
||
|
||
@article{Magnusson2011,
|
||
title = {The {{IXI Lang}}: {{A SuperCollider Parasite}} for {{Live Coding}}},
|
||
author = {Magnusson, Thor},
|
||
year = {2011},
|
||
journal = {International Computer Music Conference Proceedings},
|
||
volume = {2011},
|
||
publisher = {Michigan Publishing, University of Michigan Library},
|
||
issn = {2223-3881},
|
||
urldate = {2020-03-20}
|
||
}
|
||
|
||
@article{Markstrum2010,
|
||
title = {Staking Claims: {{A}} History of Programming Language Design Claims and Evidence: {{A}} Positional Work in Progress},
|
||
author = {Markstrum, Shane},
|
||
year = {2010},
|
||
journal = {Evaluation and Usability of Programming Languages and Tools, PLATEAU'10},
|
||
doi = {10.1145/1937117.1937124},
|
||
urldate = {2021-10-18},
|
||
abstract = {While still a relatively young field, computer science has a vast body of knowledge in the domain of programming languages. When a new language is introduced, its designers make claims which distinguish their language from previous languages. However, it often feels like language designers do not feel a pressing need to back these claims with evidence beyond personal anecdotes. Peer reviewers are likely to agree. In this paper, we present preliminary work which revisits the history of such claims by examining a number of language design papers which span the history of programming language development. We focus on the issue of claim-evidence correspondence, or determining how often claims are or are not backed by evidence. These preliminary results confirm that unsupported claims have been around since the inception of higher level programming in the 1950s. We stake a position that this behavior is unacceptable for the health of the research community. We should be more aware of valiant and effective efforts for supplying evidence to support language design claims. {\copyright} 2010 ACM.},
|
||
keywords = {Claim-evidence correspondence,Language design,Language properties,Metrics},
|
||
file = {/Users/tomoya/Zotero/storage/97KEXH5Z/Markstrum - 2010 - Staking claims A history of programming language design claims and evidence A positional work in progress.pdf;/Users/tomoya/Zotero/storage/B5MFLAXF/full-text.pdf}
|
||
}
|
||
|
||
@article{mathews_acoustic_1961,
|
||
title = {An Acoustic Compiler for Music and Psychological Stimuli},
|
||
author = {Mathews, Max V.},
|
||
year = {1961},
|
||
month = may,
|
||
journal = {The Bell System Technical Journal},
|
||
volume = {40},
|
||
number = {3},
|
||
pages = {677--694},
|
||
issn = {0005-8580},
|
||
doi = {10.1002/j.1538-7305.1961.tb03237.x},
|
||
abstract = {A program for synthesizing music and psychological stimuli on a digital computer is described. The sound is produced by three operations: (a) A compiler generates the programs for a set of instruments. (b) These instruments are ``played'' by a sequencing program at the command of a sequence of ``note'' cards which contain information analogous to that given by conventional music notes. (c) The computer output, in the form of numbers on a digital magnetic tape, is converted to audible sound by a digital-to-analog converter, a desampling filter, and a loudspeaker. By virtue of the general nature of the compiling program a great variety of instruments may be produced, and the instrument programs are quite efficient in terms of computer time. The ``note'' cards are arranged to minimize the effort necessary to specify a composition. Preliminary compositions indicate that exceedingly interesting music and useful psychological stimuli can be generated.},
|
||
file = {/Users/tomoya/Zotero/storage/IHLKBB9C/Mathews - 1961 - An acoustic compiler for music and psychological s.pdf;/Users/tomoya/Zotero/storage/CRSTYZYX/6773634.html}
|
||
}
|
||
|
||
@misc{mathews_max_2007,
|
||
title = {Max {{Mathews Full Interview}} {\textbar} {{NAMM}}.Org},
|
||
author = {Mathews, Max V.},
|
||
year = {2007},
|
||
month = mar,
|
||
urldate = {2025-01-08},
|
||
abstract = {Max Mathews was working as an engineer at the famed Bell Laboratory in 1954 when he was asked to determine if the computer Bell was designing could create music. The landmark Music 2 and later Music 4 projects put the two concepts together as early as 1957---the computer and music had a future and Max was there for the birth. Max had moved on to musical programming when Don Buchla and Robert Moog created similar electronic music in the form of the synthesizer.},
|
||
howpublished = {https://www.namm.org/video/orh/max-mathews-full-interview},
|
||
language = {en},
|
||
file = {/Users/tomoya/Zotero/storage/F9CN88YP/max-mathews-full-interview.html}
|
||
}
|
||
|
||
@book{mathews_technology_1969,
|
||
title = {The Technology of Computer Music},
|
||
author = {Mathews, Max V. and Miller, Joan E.},
|
||
year = {1969},
|
||
publisher = {M.I.T. Press},
|
||
urldate = {2020-03-31},
|
||
isbn = {0-262-13050-5},
|
||
keywords = {Computer composition sound processing}
|
||
}
|
||
|
||
@article{mathews1963,
|
||
title = {The {{Digital Computer}} as a {{Musical Instrument}}},
|
||
author = {Mathews, M.V.},
|
||
year = {1963},
|
||
month = nov,
|
||
journal = {Science,New Series},
|
||
volume = {142},
|
||
number = {3592},
|
||
eprint = {1712380},
|
||
eprinttype = {jstor},
|
||
pages = {553--557},
|
||
abstract = {A computer can be programmed to play "instrumental" music, to aid the composer, or to compose unaided. M. V. Mathews With the aid of suitable output equipment , the numbers which a modern digital computer generates can be directly converted to sound waves. The process is completely general, and any perceiv-able sound can be so produced. This potentiality of the computer has been of considerable use at the Bell Telephone Laboratories in generating stimuli for experiments in the field of hearing, and for generating speech sounds and connected speech in investigations of the factors which contribute to the intelligibility and naturalness of speech. The quality of sound is of great importance in two fields-that of speech and communication and that of music. Our studies at the Bell Laboratories in the first of these fields have led us, over the past few years, to related studies in the production of musical sounds and their organization into musical compositions. I believe that this by-product of our work on speech and hearing may be of considerable value in the world of music, and that further work in this direction will be of substantial value in furthering our understanding of psychoacoustics. There are no theoretical limitations to the performance of the computer as a source of musical sounds, in contrast to the performance of ordinary instruments. At present, the range of computer music is limited principally by cost and by our knowledge of psycho-acoustics. These limits are rapidly receding. In addition to generating sound, the computer can also function as a machine for composing music. It can either compose pieces based entirely on random numbers generated by itself or it can cooperate with a human composer. It can play its own compositions. Here I first describe the process for converting numbers to sounds, then I describe a program for playing music. Next I consider a psychoacoustic problem which is typical of those posed in attempts to make more interesting sounds. Finally, I look to the future, to the time when the computer is itself the composer. Sound from Numbers How can the numbers with which a computer deals be converted into sounds the ear can hear? The most general conversion is based upon the use of the numbers as samples of the sound pressure wave. A schematic diagram of this process is shown in Fig. 1. Here a sequence of numbers from the computer is put into an analog-to-digital converter, which generates a se-' quence of electric pulses whose amplitudes are proportional to the numbers. These pulses are smoothed with a filter and then converted to a sound wave by means of an ordinary loudspeaker. Intuitively, we feel that if a high enough pulse rate is used and the amplitudes of the pulses are generated with sufficient precision, then any sound wave can be closely approximated by this process. Mathematically, it has been established (1) that this conclusion is correct. A sound wave with frequencies from 0 to B cycles per second can be generated from a sequence of two B pulses per second. Thus, for example, by running our computer at a rate of 30,000 numbers per second, we can generate sound waves with frequencies from 0 to 15,000 cycles per second. Waves in this frequency range are about the only ones the human ear can perceive. The signal-to-quantizing-noise ratio of the sound wave depends on the accuracy with which the amplitudes of the pulses are represented. Computers deal with a finite number of digits and, hence, have limited accuracy. However, the computer limits are more than sufficient acoustically. For example, amplitudes represented by four-digit decimal numbers, are accurate to within 1 part in 10,000, an accuracy which represents a signal-to-noise ratio of 80 decibels; this is less noise than the ear can hear, and less noise than would be introduced by any audio equipment, such as the best tape recorder. The sampling process just described is theoretically unrestricted, but the generation of sound signals requires very high sampling rates. The question should immediately be asked, "Are computers of the type now available capable of generating numbers at these rates?" The answer is "Yes," with some qualifications. A high-speed machine such as the I.B.M. 7090, using the programs described later in this article, can compute only about 5000 numbers per second When generating a reasonably complex sound. However, the numbers can be temporarily stored on one of the computer's digital magnetic tapes, and this tape can subsequently be replayed at rates up to 30,000 numbers per second (each number being a 12-bit binary number). Thus, the computer is capable of generating wideband musical sounds. Because of the cost of computer time, we often limit our studies to those for which the computer is run at lower rates, such as 10,000 numbers per second a rate which yields a bandwidth of 5000 cycles per second. The direct conversion of numbers to sound is only one of the ways in which the computer can generate sounds. An alternate procedure is to use the numbers from the computer to control electronic apparatus such as oscillators and filters, which, in turn, generate the sounds. These processes have been carried out by the Radio Corporation of America music synthesizer (2) and by a machine constructed at the University of Illinois (3). This procedure has the advantage that a much lower rate 553},
|
||
file = {/Users/tomoya/Zotero/storage/PG6GGQCV/Mathews - 1963 - The Digital Computer as a Musical Instrument.pdf}
|
||
}
|
||
|
||
@article{Mathews1980,
|
||
title = {Interview with {{Max Mathews}}},
|
||
author = {Mathews, Max and Roads, C},
|
||
year = {1980},
|
||
journal = {Computer Music Journal},
|
||
volume = {4},
|
||
number = {4},
|
||
pages = {15--22},
|
||
publisher = {Winter},
|
||
urldate = {2020-04-03},
|
||
file = {/Users/tomoya/Zotero/storage/GFPCD4VD/full-text.pdf;/Users/tomoya/Zotero/storage/ZAQ37PDB/Mathews, Roads - 1980 - Interview with Max Mathews.pdf}
|
||
}
|
||
|
||
@inproceedings{matsuura_lambda-mmm_2024,
|
||
title = {Lambda-Mmm: The {{Intermediate Representation}} for {{Synchronous Signal Processing Language Based}} on {{Lambda Calculus}}},
|
||
booktitle = {Proceedings of the 4th {{International Faust Conference}}},
|
||
author = {Matsuura, Tomoya},
|
||
year = {2024},
|
||
pages = {17--25},
|
||
abstract = {This paper proposes {$\lambda$}mmm, a call-by-value, simply typed lambda calculus-based intermediate representation for a music programming language that handles synchronous signal processing and introduces a virtual machine and instruction set to execute {$\lambda$}mmm. Digital signal processing is represented by a syntax that incorporates the internal states of delay and feedback into the lambda calculus. {$\lambda$}mmm extends the lambda calculus, allowing users to construct generative signal processing graphs and execute them with consistent semantics. However, a challenge arises when handling higher-order functions because users must determine whether execution occurs within the global environment or during DSP execution. This issue can potentially be resolved through multi-stage computation.},
|
||
copyright = {All rights reserved},
|
||
isbn = {978-2-9597911-0-9},
|
||
language = {en},
|
||
file = {/Users/tomoya/Zotero/storage/X9PF87WL/Matsuura - 2024 - Lambda-mmm the Intermediate Representation for Sy.pdf}
|
||
}
|
||
|
||
@inproceedings{matsuura_mimium_2021,
|
||
title = {Mimium: {{A Self-Extensible Programming Language}} for {{Sound}} and {{Music}}},
|
||
shorttitle = {Mimium},
|
||
booktitle = {Proceedings of the 9th {{ACM SIGPLAN International Workshop}} on {{Functional Art}}, {{Music}}, {{Modelling}}, and {{Design}}},
|
||
author = {Matsuura, Tomoya and Jo, Kazuhiro},
|
||
year = {2021},
|
||
month = aug,
|
||
series = {{{FARM}} 2021},
|
||
pages = {1--12},
|
||
publisher = {Association for Computing Machinery},
|
||
address = {New York, NY, USA},
|
||
doi = {10.1145/3471872.3472969},
|
||
urldate = {2024-07-09},
|
||
abstract = {We propose a programming language for music named mimium, which combines temporal-discrete control and signal processing in a single language. mimium has an intuitive imperative syntax and can use stateful functions as Unit Generator in the same way as ordinary function definitions and applications. Furthermore, the runtime performance is made equivalent to that of lower-level languages by compiling the code through the LLVM compiler infrastructure. By using the strategy of adding a minimum number of features for sound to the design and implementation of a general-purpose functional language, mimium is expected to lower the learning cost for users, simplify the implementation of compilers, and increase the self-extensibility of the language. In this paper, we present the basic language specification, semantics for simple task scheduling, the semantics for stateful functions, and the compilation process. mimium has certain specifications that have not been achieved in existing languages. Future works suggested include extending the compiler functionality to combine task scheduling with the functional paradigm and introducing multi-stage computation for parametric replication of stateful functions.},
|
||
copyright = {All rights reserved},
|
||
isbn = {978-1-4503-8613-5},
|
||
file = {/Users/tomoya/Zotero/storage/ERG4LFIZ/Matsuura and Jo - 2021 - mimium A Self-Extensible Programming Language for.pdf;/Users/tomoya/Zotero/storage/TDBLJQTL/Matsuura and Jo - 2021 - mimium a self-extensible programming language for.pdf}
|
||
}
|
||
|
||
@inproceedings{mccartney_supercollider_1996,
|
||
title = {{{SuperCollider}}, a {{New Real Time Synthesis Language}}},
|
||
booktitle = {International {{Computer Music Conference Proceedings}}},
|
||
author = {McCartney, James},
|
||
year = {1996},
|
||
publisher = {Michigan Publishing},
|
||
issn = {2223-3881},
|
||
urldate = {2021-10-12},
|
||
file = {/Users/tomoya/Zotero/storage/5WDUN5YL/supercollider-a-new-real-time-synthesis-language.pdf}
|
||
}
|
||
|
||
@article{McCartney2002,
|
||
title = {Rethinking the Computer Music Language: {{SuperCollider}}},
|
||
author = {McCartney, James},
|
||
year = {2002},
|
||
month = dec,
|
||
journal = {Computer Music Journal},
|
||
volume = {26},
|
||
number = {4},
|
||
pages = {61--68},
|
||
publisher = {MIT Press 238 Main St., Suite 500, Cambridge, MA 02142-1046 USA journals-info@mit.edu},
|
||
issn = {01489267},
|
||
doi = {10.1162/014892602320991383},
|
||
urldate = {2020-04-03},
|
||
file = {/Users/tomoya/Zotero/storage/FEZPR5S9/014892602320991383.pdf;/Users/tomoya/Zotero/storage/ZZI9GFWR/014892602320991383.pdf}
|
||
}
|
||
|
||
@inproceedings{McLean2014,
|
||
title = {Making Programming Languages to Dance to: {{Live}} Coding with Tidal},
|
||
booktitle = {{{FARM}} 2014 - {{Proceedings}} of the 2014 {{ACM SIGPLAN International Workshop}} on {{Functional Art}}, {{Music}}, {{Modelling}} and {{Design}}},
|
||
author = {McLean, Alex},
|
||
year = {2014},
|
||
pages = {63--70},
|
||
publisher = {Association for Computing Machinery},
|
||
address = {New York, New York, USA},
|
||
doi = {10.1145/2633638.2633647},
|
||
urldate = {2020-06-08},
|
||
abstract = {Live coding of music has grown into a vibrant international community of research and practice over the past decade, providing a new research domain where computer science blends with the performing arts. In this paper the domain of live coding is described, with focus on the programming language design challenges involved, and the ways in which a functional approach can meet those challenges. This leads to the introduction of Tidal 0.4, a Domain Specific Language embedded in Haskell. This is a substantial restructuring of Tidal, which now represents musical pattern as functions from time to events, inspired by Functional Reactive Programming. {\copyright} 2014 ACM.},
|
||
isbn = {978-1-4503-3039-8},
|
||
keywords = {domain specific languages,live coding,music},
|
||
file = {/Users/tomoya/Zotero/storage/CGJJMBCY/2633638.2633647.pdf;/Users/tomoya/Zotero/storage/JRHVITZG/2633638.2633647.pdf}
|
||
}
|
||
|
||
@article{McPherson2020,
|
||
title = {Idiomatic {{Patterns}} and {{Aesthetic Influence}} in {{Computer Music Languages}}},
|
||
author = {McPherson, Andrew and Tahlro{\v g}lu, Koray},
|
||
year = {2020},
|
||
journal = {Organised Sound},
|
||
volume = {25},
|
||
number = {1},
|
||
pages = {53--63},
|
||
issn = {14698153},
|
||
doi = {10.1017/S1355771819000463},
|
||
abstract = {It is widely accepted that acoustic and digital musical instruments shape the cognitive processes of the performer on both embodied and conceptual levels, ultimately influencing the structure and aesthetics of the resulting performance. In this article we examine the ways in which computer music languages might similarly influence the aesthetic decisions of the digital music practitioner, even when those languages are designed for generality and theoretically capable of implementing any sound-producing process. We examine the basis for querying the non-neutrality of tools with a particular focus on the concept of idiomaticity: patterns of instruments or languages which are particularly easy or natural to execute in comparison to others. We then present correspondence with the developers of several major music programming languages and a survey of digital musical instrument creators examining the relationship between idiomatic patterns of the language and the characteristics of the resulting instruments and pieces. In an open-ended creative domain, asserting causal relationships is difficult and potentially inappropriate, but we find a complex interplay between language, instrument, piece and performance that suggests that the creator of the music programming language should be considered one party to a creative conversation that occurs each time a new instrument is designed.},
|
||
file = {/Users/tomoya/Zotero/storage/H5BPY7NY/Document_6790458_59753.pdf;/Users/tomoya/Zotero/storage/REBKU2E4/Document_6790458_59753.pdf}
|
||
}
|
||
|
||
@article{mernik2005,
|
||
title = {When and How to Develop Domain-Specific Languages},
|
||
author = {Mernik, Marjan and Heering, Jan and Sloane, Anthony M.},
|
||
year = {2005},
|
||
journal = {ACM Computing Surveys},
|
||
volume = {37},
|
||
number = {4},
|
||
pages = {316--344},
|
||
issn = {03600300},
|
||
doi = {10.1145/1118890.1118892},
|
||
abstract = {Domain-specific languages (DSLs) are languages tailored to a specific application domain. They offer substantial gains in expressiveness and ease of use compared with general-purpose programming languages in their domain of application. DSL development is hard, requiring both domain knowledge and language development expertise. Few people have both. Not surprisingly, the decision to develop a DSL is often postponed indefinitely, if considered at all, and most DSLs never get beyond the application library stage. Although many articles have been written on the development of particular DSLs, there is very limited literature on DSL development methodologies and many questions remain regarding when and how to develop a DSL. To aid the DSL developer, we identify patterns in the decision, analysis, design, and implementation phases of DSL development. Our patterns improve and extend earlier work on DSL design patterns. We also discuss domain analysis tools and language development systems that may help to speed up DSL development. Finally, we present a number of open problems. {\copyright} 2005 ACM.},
|
||
keywords = {Application language,Domain analysis,Domain-specific language,Language development system},
|
||
file = {/Users/tomoya/Zotero/storage/U656SXFX/Weh and How to Develop Domain Specific languages.pdf}
|
||
}
|
||
|
||
@inproceedings{miyazaki2012,
|
||
title = {Algorhythmic {{Listening}} 1949-1962 {{Auditory Practices}} of {{Early Mainframe Computing}}},
|
||
booktitle = {{{AISB}}/{{IACAP World Congress}} 2012: {{Symposium}} on the {{History}} and {{Philosophy}} of {{Programming}}, {{Part}} of {{Alan Turing Year}} 2012},
|
||
author = {Miyazaki, Shintaro},
|
||
year = {2012},
|
||
pages = {5},
|
||
abstract = {It is still very unknown that besides the first visual interfaces to early computers, such as the Williams-Kilburn Tube operating for the first time in 1948 on the Manchester Small-Scale Experimental Machine (SSEM) or the many type-machine like printing outputs of standard calculating machines, there were as well auditory interfaces, which were build in as simple amplifier-loudspeaker set-ups in to the circuits of the early mainframe computers. Examples of such machines were the famous UNIVAC-I, the TX-0 at MIT, the CSIRAC in Australia and the Pilot ACE in England, but as well later machines such as the Pegasus produced by Ferranti Ltd. in Manchester and the PASCAL-Computer of Philips Electronics in Eindhoven, Netherlands.},
|
||
language = {en},
|
||
file = {/Users/tomoya/Zotero/storage/3VML8FBZ/Shintaro - Algorhythmic Listening 1949-1962 Auditory Practice.pdf}
|
||
}
|
||
|
||
@inproceedings{Muller2020,
|
||
title = {A {{Rhetorical Framework}} for {{Programming Language Evaluation}}},
|
||
booktitle = {Onward! 2020 - {{Proceedings}} of the 2020 {{ACM SIGPLAN International Symposium}} on {{New Ideas}}, {{New Paradigms}}, and {{Reflections}} on {{Programming}} and {{Software}}, {{Co-located}} with {{SPLASH}} 2020},
|
||
author = {Muller, Stefan K. and Ringler, Hannah},
|
||
year = {2020},
|
||
month = nov,
|
||
pages = {187--194},
|
||
publisher = {Association for Computing Machinery, Inc},
|
||
address = {New York, NY, USA},
|
||
doi = {10.1145/3426428.3426927},
|
||
urldate = {2021-03-28},
|
||
abstract = {Programming languages researchers make a variety of different kinds of claims about the design of languages and related tools and calculi. Each type of claim requires different kinds of reasons and evidence to justify. Claims regarding the aesthetics or elegance of a design, or its effects on people, are especially tricky to justify because they are less strictly defined and thus are subject to change depending on the exact audience. In this essay, we take an interdisciplinary approach to this problem by drawing on the fields of argument theory and rhetorical analysis to develop a framework for justifying audience-dependent claims. In particular, we argue that researchers should provide descriptions of specific features of their systems that connect to effects on audience in order to justify these claims. To demonstrate this framework, we show several examples of how this is already being practiced in some programming languages research, and conclude by calling for authors to provide descriptive evidence to bolster such claims and to frame and strengthen other evaluation methods such as user studies.},
|
||
isbn = {978-1-4503-8178-9},
|
||
keywords = {justifying claims,programming language evaluation,rhetoric},
|
||
file = {/Users/tomoya/Zotero/storage/4Y9W67KW/Muller, Ringler - 2020 - A rhetorical framework for programming language evaluation.pdf;/Users/tomoya/Zotero/storage/Q7MWPPNC/full-text.pdf}
|
||
}
|
||
|
||
@incollection{Nishino2016,
|
||
title = {Computer {{Music Languages}} and {{Systems}}: {{The Synergy Between Technology}} and {{Creativity}}},
|
||
booktitle = {Handbook of {{Digital Games}} and {{Entertainment Technologies}}},
|
||
author = {Nishino, Hiroki and Nakatsu, Ryohei},
|
||
year = {2016},
|
||
doi = {10.1007/978-981-4560-52-8},
|
||
abstract = {INTRODUCCI{\'O}N 1 Estamos en un momento de pleno apogeo en lo que se refiere al uso de nuevas metodolog{\'i}as en la ense{\~n}anza del espa{\~n}ol como lengua extranjera, a la implementaci{\'o}n de las tecnolog{\'i}as de la informaci{\'o}n y la comunicaci{\'o}n, y a la inclusi{\'o}n de elementos l{\'u}dicos para mejorar la experiencia de ense{\~n}anza y aprendizaje. En este art{\'i}culo queremos realizar una aproximaci{\'o}n al concepto de gamificaci{\'o}n o ludificaci{\'o}n, un t{\'e}rmino ya presente en el {\'a}mbito empresarial y que recientemente se ha adaptado al contexto docente de lenguas extranjeras por las m{\'u}ltiples ventajas que pue-de ofrecer durante el aprendizaje. El uso del juego o sus elementos en el contexto de ense{\~n}anza y aprendizaje de len-guas extranjeras tiene como fin modificar el comportamiento de los aprendientes hacia el proceso de aprendizaje de la lengua meta; por ejemplo, conseguir que aumente su mo-tivaci{\'o}n y que el aprendizaje sea significativo y duradero. No obstante, para conseguir este objetivo es necesario analizar previamente las caracter{\'i}sticas contextuales, atender a los objetivos curriculares y ante todo, tener en cuenta las necesidades espec{\'i}ficas de los aprendientes. Este art{\'i}culo tiene el objetivo principal de promover una reflexi{\'o}n sobre este t{\'e}rmi-no y su implementaci{\'o}n en el aula, as{\'i} como proponer una serie de ideas para imple-mentarlas en el contexto del aula. Por {\'u}ltimo, queremos despertar en otros profesores de lengua extranjera el inter{\'e}s y la curiosidad por implementar la gamificaci{\'o}n en sus pr{\'a}cticas docentes. 1 Los datos presentados en este taller son una adaptaci{\'o}n del taller titulado " Y t{\'u}, {\textquestiondown}gamificas? " impartido por Mat{\'i}as Hidalgo Gallardo y Antonia Garc{\'i}a Jim{\'e}nez durante las III Jornadas de formaci{\'o}n de profesores de ELE en Hong Kong (13-14 de marzo de 2015). 74 {\textquestiondown}QU{\'E} ES LA GAMIFICACI{\'O}N? La conceptualizaci{\'o}n de este t{\'e}rmino tiene su origen en el mundo de los negocios, pues es en este contexto donde se empez{\'o} a utilizar. As{\'i}, Werbach y Hunter (2012) se-{\~n}alan que la gamificaci{\'o}n consiste en el uso de elementos de juegos y t{\'e}cnicas de dise{\~n}o de juegos en contextos no l{\'u}dicos. Teniendo en cuenta en el contexto en el que nos encontramos como docentes, la definici{\'o}n que acabamos de presentar debe modificarse. Tomaremos como referencia la propuesta de Foncubierta y Rodr{\'i}guez (2014) que definen la gamificaci{\'o}n como la t{\'e}cnica o t{\'e}cnicas que el profesor emplea en el dise{\~n}o de una actividad, tarea o proceso de aprendizaje (sean de naturaleza anal{\'o}gica o digital) introduciendo elementos del juego (insignias, l{\'i}-mite de tiempo, puntuaci{\'o}n, dados, etc.) y/o su pensamiento (retos, competici{\'o}n, etc.) con el fin de enriquecer esa experiencia de aprendizaje, dirigir y/o modificar el comportamiento de los alumnos en el aula (Foncubierta y Rodriguez 2).},
|
||
isbn = {978-981-4560-52-8},
|
||
file = {/Users/tomoya/Zotero/storage/JVBK3LZK/Nishino, Nakatsu - 2016 - Computer Music Languages and Systems The Synergy Between Technology and Creativity.pdf;/Users/tomoya/Zotero/storage/UKFT5TD2/Nishino, Nakatsu_2016_Handbook of Digital Games and Entertainment Technologies.pdf}
|
||
}
|
||
|
||
@phdthesis{norilo_kronos_2016,
|
||
title = {Kronos: {{Reimagining}} Musical Signal Processing},
|
||
author = {Norilo, Vesa},
|
||
year = {2016},
|
||
school = {University of the Arts Helsinki},
|
||
file = {/Users/tomoya/Zotero/storage/DIJ6Q8UF/sisus_b51.pdf;/Users/tomoya/Zotero/storage/KLHBHLZZ/sisus_b51.pdf}
|
||
}
|
||
|
||
@article{norilo2015,
|
||
title = {Kronos: {{A Declarative Metaprogramming Language}} for {{Digital Signal Processing}}},
|
||
author = {Norilo, Vesa},
|
||
year = {2015},
|
||
journal = {Computer Music Journal},
|
||
volume = {39},
|
||
number = {4},
|
||
pages = {30--48},
|
||
doi = {10.1162/COMJ_a_00330},
|
||
abstract = {Kronos is a signal-processing programming language based on the principles of semifunctional reactive systems. It is aimed at efficient signal processing at the elementary level, and built to scale towards higher-level tasks by utilizing the powerful programming paradigms of "metaprogramming" and reactive multirate systems. The Kronos language features expressive source code as well as a streamlined, efficient runtime. The programming model presented is adaptable for both sample-stream and event processing, offering a cleanly functional programming paradigm for a wide range of musical signal-processing problems, exemplified herein by a selection and discussion of code examples.},
|
||
file = {/Users/tomoya/Zotero/storage/THAKVEM6/m-api-574ff3be-cfe2-7867-406a-df50770bf1cb.pdf}
|
||
}
|
||
|
||
@article{Nyquist1928,
|
||
title = {Certain {{Topics}} in {{Telegraph Transmission Theory}}},
|
||
author = {Nyquist, H.},
|
||
year = {1928},
|
||
month = apr,
|
||
journal = {Transactions of the American Institute of Electrical Engineers},
|
||
volume = {47},
|
||
number = {2},
|
||
pages = {617--644},
|
||
issn = {0096-3860},
|
||
doi = {10.1109/T-AIEE.1928.5055024},
|
||
urldate = {2019-01-20},
|
||
file = {/Users/tomoya/Zotero/storage/MDQ8W5KZ/nyquist1928.pdf}
|
||
}
|
||
|
||
@incollection{Orlarey2009,
|
||
title = {{{FAUST}} : An {{Efficient Functional Approach}} to {{DSP Programming}}},
|
||
booktitle = {New {{Computational Paradigms}} for {{Computer Music}}},
|
||
author = {Orlarey, Yann and Fober, Dominique and Letz, St{\'e}phane and Letz, Stephane},
|
||
year = {2009},
|
||
publisher = {DELATOUR FRANCE},
|
||
urldate = {2020-03-28},
|
||
file = {/Users/tomoya/Zotero/storage/LB4PIMPY/full-text.pdf}
|
||
}
|
||
|
||
@misc{ostertag1998,
|
||
title = {Why {{Computer Music Sucks}}},
|
||
author = {Ostertag, Bob},
|
||
year = {1998},
|
||
urldate = {2025-01-17},
|
||
howpublished = {https://web.archive.org/web/20160312125123/http://bobostertag.com/writings-articles-computer-music-sucks.htm},
|
||
file = {/Users/tomoya/Zotero/storage/9QAGQSVS/writings-articles-computer-music-sucks.html}
|
||
}
|
||
|
||
@misc{puckette_47_2020,
|
||
title = {47 {$\bullet$} {{Miller Puckette}} {$\bullet$} {{Max}}/{{MSP}} \& {{Pure Data}}},
|
||
author = {Reese, Ivan},
|
||
year = {2020},
|
||
month = may,
|
||
journal = {Future of Coding},
|
||
number = {47},
|
||
urldate = {2022-01-23},
|
||
abstract = {Are you looking for the real computer revolution? Join the club! Future of Coding is a podcast and community of toolmakers, researchers, and creators working together to reimagine computing.},
|
||
collaborator = {Puckette, Miller S.},
|
||
language = {english},
|
||
file = {/Users/tomoya/Zotero/storage/E4PL98DG/047.html}
|
||
}
|
||
|
||
@inproceedings{puckette_pure_1997,
|
||
title = {Pure {{Data}}},
|
||
booktitle = {International {{Computer Music Conference Proceedings}}},
|
||
author = {Puckette, Miller},
|
||
year = {1997},
|
||
publisher = {Michigan Publishing, University of Michigan Library},
|
||
issn = {2223-3881},
|
||
file = {/Users/tomoya/Zotero/storage/E5VQAJSD/puredata_icmc97.pdf}
|
||
}
|
||
|
||
@article{puckette2015,
|
||
title = {The {{Sampling Theorem}} and {{Its Discontents}}},
|
||
author = {Puckette, Miller},
|
||
year = {2015},
|
||
journal = {International Computer Music Conference},
|
||
pages = {1--14},
|
||
abstract = {The fundamental principle of Computer Music is usually taken to be the Nyquist Theorem, which, in its usually cited form, states that a band-limited function can be exactly represented by sampling it at regular intervals. This paper will not quarrel with the theorem itself, but rather will test the assump-tions under which it is commonly applied, and endeavor to show that there are interesting approaches to computer music that lie outside the framework of the sampling theorem. As we will see in Section 3, sampling violations are ubiquitous in every-day electronic music practice. The severity of these violations can usually be mitigated either through various engineering practices and/or careful critical lis-tening. But their existence gives the lie to the popular understanding of digital audio practice as being " lossless " . This is not to deny the power of modern digital signal processing theory and its applications, but rather to claim that its underlying assumption---that the sampled signals on which we are operating are to be thought of as exactly rep-resenting band-limited continuous-time functions---sheds light on certain digital operations (notably time-invariant filtering) but not so aptly on others, such as classical synthesizer waveform generation. Digital audio practitioners cannot escape the necessity of representing contin-uous-time signals with finite-sized data structures. But the blanket assumption that such signals can only be represented via the sampling theorem can be unnecessarily limiting. In Sections 4 and 6 I'll describe investigations by two recent UCSD graduates that each adopt a distinct approach to audio manipu-lation outside the framework of the sampling theorem. A collection of accompanying patches that demonstrate some of these ideas can be downloaded from msp.ucsd.edu/ideas/icmc15-examples/. 1 1 The assumptions Band-limited functions are a vector space: you can scale one of them, or add two of them, to get another. But that is where closure ends. The trouble begins as soon as we even go so far as to multiply one signal by another. Suppose two sampled signals, X[n] and Y [n], are used to represent two continuous functions of time x(t), y(t), which we assume to be band-limited, containing only frequen-cies in the Nyquist frequency band, the interval (-R/2, R/2) where R is the sample rate. The values can either be real or complex, and for simplicity we'll assume the computer can exactly represent the numerical values. (It isn't true but that is usually a comparatively minor issue). There is, of course, a perfectly good continuous-time signal, call it z(t), that is represented by the computable product, Z[n] = X[n]Y [n]. But it's not in general the case that z(t) = x(t)y(t). We didn't in reality make the product of the two continuous-time signals we were representing when we multiplied their computer representations. At this point we can look ruefully back at every occurrence of the character " * " in all the Csound, Pd, SuperCollider, Kyma, 4X, or MUSIC 10 instruments we've ever built and reflect on the fact that the result isn't really correct, if we regard our sampled signals as representing continuous-time ones. Often it's a very serviceable approximation. If, for instance, the signals x(t) and y(t) have frequency limits whose sum is less than R/2, the multiplication is exact; and when not exact, it is often a very good approximation. But the approximation's accuracy or lack thereof is rarely worked out explicitly. We could always take action to band-limit two signals (by filtering them) before multiplying so that the multiplication itself doesn't yield frequencies out-side the Nyquist frequency band. But this would cause delays and/or phase distortion, not to mention the computational cost this would incur. One fundamental operation in electronic music practice (in my thinking, the most fundamental one) is table lookup, which is used in digital oscillators and samplers, and also in nonlinear techniques such as FM and waveshaping. Again sidestepping the comparatively minor issue of the accuracy limits of wavetable lookup, we instead again consider the possibility of frequency products landing outside the Nyquist band. Suppose the incoming signal is a sinusoid of frequency \${\o}mega\$ and that the wavetable lookup can be approximated as a power series, f (x) = a 0 + a 1 x + a 2 x 2 + {$\cdot$} {$\cdot$} {$\cdot$}},
|
||
file = {/Users/tomoya/Zotero/storage/MID3RJBP/Puckette - 2015 - The Sampling Theorem and Its Discontents.pdf;/Users/tomoya/Zotero/storage/PSEY42RW/m-api-72fdc1c2-f979-d1a3-01e3-3e0da12b5bbf.pdf}
|
||
}
|
||
|
||
@book{roads2023,
|
||
title = {{The Computer Music Tutorial, second edition}},
|
||
author = {Roads, Curtis},
|
||
year = {2023},
|
||
month = jun,
|
||
edition = {第2版},
|
||
publisher = {The MIT Press},
|
||
address = {Cambridge, Massachusetts London},
|
||
abstract = {Expanded, updated, and fully revised---the definitive introduction to electronic music is ready for new generations of students.Essential and state-of-the-art, The Computer Music Tutorial, second edition is a singular text that introduces computer and electronic music, explains its motivations, and puts topics into context. Curtis Roads's step-by-step presentation orients musicians, engineers, scientists, and anyone else new to computer and electronic music.The new edition continues to be the definitive tutorial on all aspects of computer music, including digital audio, signal processing, musical input devices, performance software, editing systems, algorithmic composition, MIDI, and psychoacoustics, but the second edition also reflects the enormous growth of the field since the book's original publication in 1996. New chapters cover up-to-date topics like virtual analog, pulsar synthesis, concatenative synthesis, spectrum analysis by atomic decomposition, Open Sound Control, spectrum editors, and instrument and patch editors. Exhaustively referenced and cross-referenced, the second edition adds hundreds of new figures and references to the original charts, diagrams, screen images, and photographs in order to explain basic concepts and terms.FeaturesNew chapters: virtual analog, pulsar synthesis, concatenative synthesis, spectrum analysis by atomic decomposition, Open Sound Control, spectrum editors, instrument and patch editors, and an appendix on machine learningTwo thousand references support the book's descriptions and point readers to further studyMathematical notation and program code examples used only when necessaryTwenty-five years of classroom, seminar, and workshop use inform the pace and level of the material},
|
||
isbn = {978-0-262-04491-2},
|
||
language = {英語}
|
||
}
|
||
|
||
@misc{ruiz_vult_2020,
|
||
title = {Vult {{Language}}},
|
||
author = {Ruiz, Leonardo Laguna},
|
||
year = {2020},
|
||
urldate = {2020-09-27}
|
||
}
|
||
|
||
@misc{Ruiz2020,
|
||
title = {Vult {{Language}}},
|
||
author = {Ruiz, Leonardo Laguna},
|
||
year = {2020},
|
||
urldate = {2024-11-27},
|
||
howpublished = {http://modlfo.github.io/vult/}
|
||
}
|
||
|
||
@inproceedings{Salazar2012,
|
||
title = {{{CHUGENS}}, {{CHUBGRAPHS}}, {{CHUGINS}}: 3 {{TIERS FOR EXTENDING CHUCK}}},
|
||
booktitle = {International {{Computer Music Conference Proceedings}}},
|
||
author = {Salazar, Spencer and Wang, Ge},
|
||
year = {2012},
|
||
pages = {60--63},
|
||
file = {/Users/tomoya/Zotero/storage/6XY3DR2B/chugens-chubgraphs-chugins-3-tiers-for-extending-chuck.pdf}
|
||
}
|
||
|
||
@article{scheirer1999,
|
||
title = {{{SAOL}}: {{The MPEG-4 Structured Audio Orchestra Language}}},
|
||
shorttitle = {{{SAOL}}},
|
||
author = {Scheirer, Eric D. and Vercoe, Barry L.},
|
||
year = {1999},
|
||
month = jun,
|
||
journal = {Computer Music Journal},
|
||
volume = {23},
|
||
number = {2},
|
||
pages = {31--51},
|
||
issn = {0148-9267, 1531-5169},
|
||
doi = {10.1162/014892699559742},
|
||
urldate = {2022-01-06},
|
||
language = {en},
|
||
file = {/Users/tomoya/Zotero/storage/NIULED49/Scheirer and Vercoe - 1999 - SAOL The MPEG-4 Structured Audio Orchestra Langua.pdf;/Users/tomoya/Zotero/storage/U9MFTBDB/Scheirer and Vercoe - 1999 - SAOL The MPEG-4 Structured Audio Orchestra Langua.pdf}
|
||
}
|
||
|
||
@phdthesis{sorensen_extempore:_2018,
|
||
title = {Extempore: {{The}} Design, Implementation and Application of a Cyber-Physical Programming Language},
|
||
author = {Sorensen, Andrew Carl},
|
||
year = {2018},
|
||
doi = {10.25911/5D67B75C3AAF0},
|
||
school = {The Australian National University},
|
||
keywords = {Computer Music,Cyber,Extempore,High Performance Computing,Human Computer Interaction,Live Coding,Live Programming,Physical Programming},
|
||
file = {/Users/tomoya/Zotero/storage/5HUUW8EZ/full-text.pdf;/Users/tomoya/Zotero/storage/B2JYT8R8/Sorensen - 2018 - Extempore The design, implementation and application of a cyber-physical programming language(3).pdf}
|
||
}
|
||
|
||
@article{Spinellis2001,
|
||
title = {Notable Design Patterns for Domain-Specific Languages},
|
||
author = {Spinellis, Diomidis},
|
||
year = {2001},
|
||
month = feb,
|
||
journal = {Journal of Systems and Software},
|
||
volume = {56},
|
||
number = {1},
|
||
pages = {91--99},
|
||
publisher = {Elsevier Inc.},
|
||
issn = {01641212},
|
||
doi = {10.1016/S0164-1212(00)00089-3},
|
||
urldate = {2021-05-01},
|
||
abstract = {The realisation of domain-specific languages (DSLS) differs in fundamental ways from that of traditional programming languages. We describe eight recurring patterns that we have identified as being used for DSL design and implementation. Existing languages can be extended, restricted, partially used, or become hosts for DSLS. Simple DSLS can be implemented by lexical processing. In addition, DSLS can be used to create front-ends to existing systems or to express complicated data structures. Finally, DSLS can be combined using process pipelines. The patterns described form a pattern language that can be used as a building block for a systematic view of the software development process involving DSLS. {\copyright} 2001 Elsevier Science Inc. All rights reserved.},
|
||
keywords = {Design patterns,Domain-specific languages},
|
||
file = {/Users/tomoya/Zotero/storage/RKYRSCKJ/dslpat.pdf;/Users/tomoya/Zotero/storage/SVXY7CTA/dslpat.pdf}
|
||
}
|
||
|
||
@book{sterne_audible_2003,
|
||
title = {The Audible Past: Cultural Origins of Sound Reproduction},
|
||
shorttitle = {The Audible Past},
|
||
author = {Sterne, Jonathan},
|
||
year = {2003},
|
||
publisher = {Duke University Press},
|
||
address = {Durham},
|
||
isbn = {978-0-8223-3013-4 978-0-8223-3004-2},
|
||
lccn = {TK7881.4 .S733 2003},
|
||
keywords = {Popular culture,Recording and reproducing History,Social aspects,Sound,Sound in mass media,Sound recording industry,Sound recordings}
|
||
}
|
||
|
||
@book{sterne_diminished_2022,
|
||
title = {{Diminished Faculties: A Political Phenomenology of Impairment}},
|
||
shorttitle = {{Diminished Faculties}},
|
||
author = {Sterne, Jonathan},
|
||
year = {2022},
|
||
month = jan,
|
||
publisher = {Duke Univ Press},
|
||
address = {Durham},
|
||
abstract = {In Diminished Faculties Jonathan Sterne offers a sweeping cultural study and theorization of impairment. Drawing on his personal history with thyroid cancer and a paralyzed vocal cord, Sterne undertakes a political phenomenology of impairment in which experience is understood from the standpoint of a subject that is not fully able to account for itself. He conceives of impairment as a fundamental dimension of human experience, examining it as both political and physical. While some impairments are enshrined as normal in international standards, others are treated as causes or effects of illness or disability. Alongside his fractured account of experience, Sterne provides a tour of alternative vocal technologies and practices; a study of ``normal'' hearing loss as a cultural practice rather than a medical problem; and an intertwined history and phenomenology of fatigue that follows the concept as it careens from people to materials science to industrial management to spoons. Sterne demonstrates how impairment is a problem, opportunity, and occasion for approaching larger questions about disability, subjectivity, power, technology, and experience in new ways. Diminished Faculties ends with a practical user's guide to impairment theory.},
|
||
isbn = {978-1-4780-1508-6},
|
||
language = {英語}
|
||
}
|
||
|
||
@article{sterne_there_2014,
|
||
title = {There {{Is No Music Industry}}},
|
||
author = {Sterne, Jonathan},
|
||
year = {2014},
|
||
month = jan,
|
||
journal = {Media Industries Journal},
|
||
volume = {1},
|
||
number = {1},
|
||
pages = {50--55},
|
||
publisher = {University of Michigan Library},
|
||
doi = {10.3998/MIJ.15031809.0001.110},
|
||
urldate = {2021-07-22},
|
||
abstract = {The locution ``music industry'' still too often refers to a single subset of profit-making practices in music: record labels and the activities around them. Media scholars are partly to blame, as they continue to define record labels, and especially labels that are part of conglomerates, in this way. Yet this notion of the production and sale of recordings as the basis of "the music industry" is hardly a part that represents the whole. Drawing on the work of Christopher Small and others who have decentered the musical text as the basis of music criticism, I argue that media industries scholars must do the same, opening up our inquiries to a wide range of music industries ; that is, industries whose activities directly affect the performance, production, circulation, consumption, recirculation, appropriation, and enjoyment of music today. Opening the term up in this way will allow us to develop more robust and coherent social accounts of music as a media practice, and provide a stronger empirical basis for criticizing current institutional arrangements and proposing new, more just and convivial alternatives.},
|
||
file = {/Users/tomoya/Downloads/full-text (日本語).pdf;/Users/tomoya/Zotero/storage/BAS5KW6K/full-text.pdf}
|
||
}
|
||
|
||
@article{sterne2006a,
|
||
title = {The Death and Life of Digital Audio},
|
||
author = {Sterne, Jonathan},
|
||
year = {2006},
|
||
month = dec,
|
||
journal = {Interdisciplinary Science Reviews},
|
||
volume = {31},
|
||
number = {4},
|
||
pages = {338--348},
|
||
issn = {0308-0188, 1743-2790},
|
||
doi = {10.1179/030801806X143277},
|
||
urldate = {2025-01-20},
|
||
language = {en},
|
||
file = {/Users/tomoya/Zotero/storage/XDM6F967/Sterne - 2006 - The death and life of digital audio.pdf}
|
||
}
|
||
|
||
@article{sterne2008,
|
||
title = {Being '{{In}} the {{True}}' of {{Sound Studies}}},
|
||
author = {Sterne, Jonathan},
|
||
year = {2008},
|
||
month = dec,
|
||
journal = {Music, Sound, and the Moving Image},
|
||
volume = {2},
|
||
number = {2},
|
||
pages = {163--167},
|
||
publisher = {Liverpool University Press},
|
||
doi = {10.3828/msmi.2.2.11},
|
||
urldate = {2025-01-20},
|
||
file = {/Users/tomoya/Zotero/storage/QJTUHNJN/Sterne - 2008 - Being 'In the True' of Sound Studies.pdf}
|
||
}
|
||
|
||
@book{tanaka_all_2017,
|
||
title = {All {{About Chiptune}}: {{New Music Born}} from {{Games}}},
|
||
author = {Tanaka, Haruhisa(hally)},
|
||
year = {2017},
|
||
publisher = {Seibundo Shinkosha},
|
||
isbn = {978-4-416-61621-5},
|
||
annotation = {title translation by the author.}
|
||
}
|
||
|
||
@misc{taylor_article_1999,
|
||
title = {Article: {{An Interview With David Wessel}} {\textbar} {{Cycling}} '74},
|
||
shorttitle = {Article},
|
||
author = {Taylor, Gregory},
|
||
year = {1999},
|
||
urldate = {2022-01-20},
|
||
abstract = {David Wessel is Professor of Music at the University of California, Berkeley where he directs the Center for New Music and Audio Technologies (CNMAT). Wessel worked at IRCAM between 1979 and 1988; his activities there included starting the department where Miller Puckette first began working on Max on a Macintosh. Since Wessel's arrival in Berkeley over ten years ago, CNMAT has been actively involved in teaching Max/MSP as well as developing freely available Max-based software projects.},
|
||
howpublished = {https://cycling74.com/articles/an-interview-with-david-wessel},
|
||
language = {en},
|
||
file = {/Users/tomoya/Zotero/storage/ZM7E9L9Q/an-interview-with-david-wessel.html}
|
||
}
|
||
|
||
@book{theberge_any_1997,
|
||
title = {Any Sound You Can Imagine: Making Music/Consuming Technology},
|
||
shorttitle = {Any Sound You Can Imagine},
|
||
author = {Th{\'e}berge, Paul},
|
||
year = {1997},
|
||
series = {Music/Culture},
|
||
publisher = {Wesleyan University Press : University Press of New England},
|
||
address = {Hanover, NH},
|
||
isbn = {978-0-8195-5307-2 978-0-8195-6309-5},
|
||
lccn = {ML1092 .T38 1997},
|
||
keywords = {Computer sound processing,Electronic musical instruments,Music and technology}
|
||
}
|
||
|
||
@article{theberge_any_2023,
|
||
title = {Any {{Sound You Can Imagine}}: {{Then}} and Now},
|
||
shorttitle = {Any {{Sound You Can Imagine}}},
|
||
author = {Th{\'e}berge, Paul},
|
||
year = {2023},
|
||
month = jun,
|
||
journal = {Journal of Popular Music Education},
|
||
volume = {7},
|
||
number = {The 25th Anniversary Release of Th{\'e}berge's Any Sound You Can Imagine: Making Music/Consuming Technology},
|
||
pages = {219--229},
|
||
publisher = {Intellect},
|
||
issn = {2397-6721, 2397-673X},
|
||
doi = {10.1386/jpme_00115_1},
|
||
urldate = {2025-01-22},
|
||
abstract = {During the 25 years since the publication of my book, Any Sound You Can Imagine: Making Music/Consuming Technology, a number of technological developments and theoretical trends have emerged: among them, the integration of music production within Digital Audio Workstation (DAW) platforms, and the rise of social media as a means for information sharing among musicians, on the one hand; and the emergence, in popular music studies, of practice-based and community-oriented forms of music research and pedagogy, on the other. In addition, new technologies and applications of artificial intelligence (AI) have begun to have an impact on music-making and listening at every level. These developments are discussed in relation to theoretical issues of innovation, production, consumption and gender found in my previous work and, more specifically, in relation to concerns raised in a number of articles in the present volume, using them as a springboard for further reflection and theorizing.},
|
||
language = {en},
|
||
file = {/Users/tomoya/Zotero/storage/4FJLP4DZ/Théberge - 2023 - Any Sound You Can Imagine Then and now.pdf;/Users/tomoya/Zotero/storage/EHEXPCGE/jpme_00115_1.html}
|
||
}
|
||
|
||
@misc{toplap_manifestodraft_2004,
|
||
title = {{{ManifestoDraft}} - {{Toplap}}},
|
||
author = {{TOPLAP}},
|
||
year = {2004},
|
||
urldate = {2025-01-26},
|
||
howpublished = {https://toplap.org/wiki/ManifestoDraft}
|
||
}
|
||
|
||
@article{vercoe_computer_1983,
|
||
title = {Computer {{Systems}} and {{Languages}} for {{Audio Research}}},
|
||
author = {Vercoe, Barry L.},
|
||
year = {1983},
|
||
journal = {The New World of Digital Audio (Audio Engineering Society Special Edition)},
|
||
pages = {245--250},
|
||
file = {/Users/tomoya/Zotero/storage/5FWAAURE/Vercoe - Computer Systems and Languages for Audio Research.pdf}
|
||
}
|
||
|
||
@inproceedings{wakefield2010,
|
||
title = {{{LuaAV}}: {{Extensibility}} and {{Heterogeneity}} for {{Audiovisual Computing}}},
|
||
booktitle = {Proceeding of {{Linux Audio Conference}}},
|
||
author = {Wakefield, Graham and Smith, Wesley and Roberts, Charles},
|
||
year = {2010},
|
||
urldate = {2020-06-07},
|
||
abstract = {We describe LuaAV, a runtime library and application which extends the Lua programming language to support computational composition of temporal, sound, visual, spatial and other elements. In this paper we document how we have attempted to maintain several core principles of Lua itself-extensibility, meta-mechanisms, efficiency, portabil-ity-while providing the flexibility and temporal accuracy demanded by interactive audiovisual media. Code generation is noted as a recurrent strategy for increasingly dynamic and extensible environments. LuaAV is an integrated programming environment based upon extensions to the Lua programming language enabling the tight real-time integration of computation, time, sound and space. LuaAV has grown from the needs of students and researchers in the Media Arts \& Technology program at the University of California Santa Barbara; its origins lie in earlier Lua-based audio and visual tools [20] [15] [22]. More recently it has formed a central component of media software infrastructure for the AlloSphere [1] research space (a 3-storey immersive spherical cave-like environment with stereographic projection and spatial audio). Various projects built using LuaAV have been performed, exhibited or installed internationally , for scientific visualization [1], data visualization [13], immersive generative art [21], game development 1 , live-coding (Figure 1) and audiovisual performance 2. LuaAV is available under a UC Regents license similar in nature to the BSD license 3 .},
|
||
keywords = {Audio-visual,composition,Lua,scripting language 1 LuaAV},
|
||
file = {/Users/tomoya/Zotero/storage/C8WADNNI/full-text.pdf}
|
||
}
|
||
|
||
@article{wang_chuck_2015,
|
||
title = {{{ChucK}}: {{A Strongly Timed Computer Music Language}}},
|
||
author = {Wang, Ge and Cook, Perry R and Salazar, Spencer},
|
||
year = {2015},
|
||
journal = {Computer Music Journal},
|
||
volume = {39},
|
||
number = {4},
|
||
pages = {10--29},
|
||
doi = {10.1162/COMJ_a_00324},
|
||
abstract = {ChucK is a programming language designed for computer music. It aims to be expressive and straightforward to read and write with respect to time and concurrency, and to provide a platform for precise audio synthesis and analysis and for rapid experimentation in computer music. In particular, ChucK defines the notion of a strongly timed audio programming language, comprising a versatile time-based programming model that allows programmers to flexibly and precisely control the flow of time in code and use the keyword now as a time-aware control construct, and gives programmers the ability to use the timing mechanism to realize sample-accurate concurrent programming. Several case studies are presented that illustrate the workings, properties, and personality of the language. We also discuss applications of ChucK in laptop orchestras, computer music pedagogy, and mobile music instruments. Properties and affordances of the language and its future directions are outlined.},
|
||
file = {/Users/tomoya/Zotero/storage/4BFQ6VDF/Wang, Cook, Salazar - 2015 - ChucK A Strongly Timed Computer Music Language.pdf}
|
||
}
|
||
|
||
@incollection{wang2017,
|
||
title = {A {{History}} of {{Programming}} and {{Music}}},
|
||
booktitle = {Cambridge {{Companion}} to {{Electronic Music}}},
|
||
author = {Wang, Ge},
|
||
year = {2017},
|
||
edition = {2},
|
||
pages = {58--85},
|
||
publisher = {Cambridge University Press},
|
||
doi = {10.1017/9781316459874.006},
|
||
isbn = {978-1-107-59002-1},
|
||
language = {en},
|
||
file = {/Users/tomoya/Zotero/storage/B4EJW298/Wang - A History of Programming and Music.pdf}
|
||
}
|
||
|
||
@techreport{woltman1990,
|
||
title = {{{UNIVAC Conference}}.},
|
||
author = {Woltman, Richard D. and Woltman, Frances B. and Wilson, Louis D. and Tonik, Albert B. and Swearingen, John K. and Shuler, Cecil M. and Sberro, Joseph E. and Sammet, Jean E. and Matter, H. W. and Marquardt, Donald W. and Koons, Florence K. and Huff, Morgan W. and Holberton, Frances E. and Hammer, Carl and Dixon, Donald B. and Delves, Eugene L. and Danehower, George and Chinitz, M. Paul and Carter, Lee S. and Bartik, Jean and Armstrong, Lancelot W. and Armstrong, Dorothy P. and Adams, Armand E.},
|
||
year = {1990},
|
||
institution = {Charles Babbage Institute},
|
||
file = {/Users/tomoya/Zotero/storage/QCBXZMFI/Woltman, Richard D. et al. - 1990 - UNIVAC Conference..pdf}
|
||
}
|