Compare commits
251 Commits
7ffb1ad3b9
...
v4
Author | SHA1 | Date | |
---|---|---|---|
07db97558a | |||
e24cd3fbac | |||
77d5cd6f23 | |||
d5ca99dc8e | |||
1b8126fc74 | |||
ef52a0acb0 | |||
e64e576de2 | |||
b39833eff4 | |||
cb9758f2ac | |||
93db997578 | |||
ae4d1e6074 | |||
4ad62c93b3 | |||
e6d4bf7375 | |||
fb4679f022 | |||
6c4e982307 | |||
4987f49748 | |||
2882478a4e | |||
3f9741d928 | |||
25dadb2bdb | |||
04ebc4758e | |||
c673725980 | |||
e0bc392e35 | |||
c6ebbb0c87 | |||
170694365a | |||
da4f29ba9c | |||
5aa5cf4b62 | |||
e8cbc8af2c | |||
4fc03a8d3e | |||
e2dc5d85d3 | |||
ed4b6e3446 | |||
3bbbdfcd8b | |||
7059100df9 | |||
54a1d395ea | |||
cf4b79cff0 | |||
72254fb236 | |||
476c2b6a6a | |||
7f8ebd9402 | |||
ed1309ea3f | |||
35b8a4d1f2 | |||
81fc719da4 | |||
bc48d12f64 | |||
e809636ef0 | |||
40053ad4a4 | |||
16b5714eb1 | |||
04fe4d9ac3 | |||
9a714fa9a0 | |||
556725e412 | |||
5d35714e4d | |||
d47f555a17 | |||
2465e9d461 | |||
20c42a9288 | |||
b885b61d24 | |||
8a0cdb0f6c | |||
ff9b857e55 | |||
2e2478874e | |||
798e7390eb | |||
13c18c0031 | |||
193779b5f5 | |||
316d3d2d94 | |||
62a09d9cb7 | |||
e777f4e6ff | |||
e7b3d274de | |||
1401d92bae | |||
3c03d4b488 | |||
5ed3ca7560 | |||
47ba0a9cf5 | |||
ce4f12f690 | |||
b3f6b0d614 | |||
26c2e90fee | |||
94327817a6 | |||
b9a5b47ddc | |||
220f83169b | |||
21fd652268 | |||
ec7012dca9 | |||
5419032229 | |||
b56fee3fed | |||
903e8b7f0f | |||
bf6646f069 | |||
aff2a6ef2b | |||
1e231217a1 | |||
d4f2d80584 | |||
32df810e2c | |||
fc1cd57d6c | |||
ee61098fec | |||
418fb4154f | |||
77b4825b66 | |||
726cc73c9f | |||
c9b0ad6e46 | |||
681b866b96 | |||
8e4bf3df7f | |||
e3d2d83361 | |||
28d79e6425 | |||
1d9881a13d | |||
390491ffa6 | |||
38e348443a | |||
a6d98ab22b | |||
abbbb13e25 | |||
35cf18ec3e | |||
11ec40a0c1 | |||
e453c91c4c | |||
8db7ce62a6 | |||
6ca9b16c57 | |||
f615f063ca | |||
2f8f858c49 | |||
19898f15f3 | |||
c5732a3fd0 | |||
a951419443 | |||
3dcfd929c8 | |||
4843f9e5f2 | |||
b0423cf136 | |||
9bc1680c23 | |||
5d2ef45dd0 | |||
62f89aad61 | |||
ab5837d3f6 | |||
3e2fd4c0f2 | |||
65805ebc4f | |||
b25de51f4f | |||
2143f17405 | |||
d16298fb68 | |||
27cf8f64d2 | |||
61325c2198 | |||
38bcf4613c | |||
4217b888c5 | |||
c6602a6af1 | |||
6cff743ea8 | |||
a269396598 | |||
5cb719c069 | |||
623e72d8c5 | |||
e18c9658cd | |||
5e6917456b | |||
b7e0664f41 | |||
ca80ca8458 | |||
26377bf450 | |||
e0b74cb776 | |||
65ab644669 | |||
a2eb2b4910 | |||
0ccedd57de | |||
4b8fb1bf26 | |||
19e8e316cd | |||
fb23199eb2 | |||
0d93822b64 | |||
ab95bd3188 | |||
c91622ef6f | |||
f08aa094ed | |||
44c2a632bd | |||
aa0db45fa8 | |||
315c813016 | |||
923d38769d | |||
283847408d | |||
62c21acb98 | |||
fd5603f9f0 | |||
7abd422536 | |||
4dc67e758c | |||
563df75cec | |||
486ef4d0c3 | |||
eed48bdca9 | |||
e13f51f64b | |||
decb618e85 | |||
4629f1bf16 | |||
4adc8a60d6 | |||
0bdaa19c19 | |||
4d7866e80a | |||
b0f9d6478e | |||
f730f9ce75 | |||
74ea0c2ff9 | |||
5dfcb80088 | |||
1c377a1d35 | |||
beb4332edb | |||
93eb51889b | |||
ea636ce114 | |||
962f83318d | |||
ad061f140a | |||
33f920e78b | |||
74447b7f2b | |||
20b2ebbe71 | |||
5ca4c10731 | |||
4c2d5ddfed | |||
533f190e81 | |||
71cc189e2d | |||
c62cdea207 | |||
1bcac334c7 | |||
f568955ef3 | |||
6f8ba0b8f2 | |||
ccafa25abd | |||
ad9c87b5a2 | |||
3f72cb1aaa | |||
e33c1884ea | |||
415e17a0b0 | |||
388da4c156 | |||
9d90eb8a0c | |||
504ed6d2e5 | |||
e6224bf5d1 | |||
24095a7c2a | |||
b1d358d2e8 | |||
b1985deb47 | |||
7ebe80fff1 | |||
ac7cffa9dd | |||
9c98b1e48d | |||
8375d19b4a | |||
41f65913f6 | |||
2b028e6f23 | |||
5ef3556b0c | |||
92ac444dd1 | |||
648269f7fc | |||
9a50a92cc8 | |||
3875c9974c | |||
1ce1dcbd93 | |||
3d32ca877e | |||
c348fd25cb | |||
65406b5f85 | |||
113166c712 | |||
cd54ffe759 | |||
b4445ecdc7 | |||
b525bb1a0d | |||
5c95a5de77 | |||
f9fbeeb973 | |||
a44f113472 | |||
49e753518f | |||
fb0c342a31 | |||
6318b18368 | |||
0a8f324310 | |||
d3d385a110 | |||
1d942f9d78 | |||
f326cabda5 | |||
7b656f6721 | |||
72809276a3 | |||
8e7f3ac69f | |||
707d58381d | |||
a5bc82518b | |||
c2c3d48e24 | |||
595b6d4aaa | |||
e26d326208 | |||
48aabf8898 | |||
474bbd2856 | |||
c8d0133203 | |||
26a3a42ae1 | |||
bc7de8c378 | |||
ec8d37ad8b | |||
ed097f1c45 | |||
1226c5dd18 | |||
4122c10d53 | |||
dda6103fa0 | |||
425a21e0a9 | |||
f3b30932db | |||
c02b963bff | |||
674b51f050 | |||
cc3a015c6a | |||
a177f7a310 | |||
8d946609fb | |||
bcd1f5655a | |||
cbd2205330 |
8
content/.obsidian/community-plugins.json
vendored
@ -2,9 +2,13 @@
|
||||
"obsidian-pandoc-reference-list",
|
||||
"templater-obsidian",
|
||||
"better-word-count",
|
||||
"obsidian-local-rest-api",
|
||||
"obsidian-excalidraw-plugin",
|
||||
"obsidian-git",
|
||||
"obsidian-auto-link-title",
|
||||
"2hop-links-plus"
|
||||
"2hop-links-plus",
|
||||
"obsidian-focus-mode",
|
||||
"obsidian-zotero-desktop-connector",
|
||||
"obsidian-outliner",
|
||||
"obsidian-local-rest-api",
|
||||
"mcp-tools"
|
||||
]
|
3
content/.obsidian/core-plugins.json
vendored
@ -26,5 +26,6 @@
|
||||
"file-recovery": true,
|
||||
"publish": false,
|
||||
"sync": false,
|
||||
"properties": true
|
||||
"properties": true,
|
||||
"webviewer": false
|
||||
}
|
2
content/.obsidian/graph.json
vendored
@ -60,6 +60,6 @@
|
||||
"repelStrength": 15.1642583672965,
|
||||
"linkStrength": 0.975453871804372,
|
||||
"linkDistance": 42,
|
||||
"scale": 0.6866831001110424,
|
||||
"scale": 0.7322130672951164,
|
||||
"close": true
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "obsidian-zotero-desktop-connector",
|
||||
"name": "Zotero Integration",
|
||||
"version": "3.1.8",
|
||||
"version": "3.2.1",
|
||||
"minAppVersion": "1.1.1",
|
||||
"description": "Insert and import citations, bibliographies, notes, and PDF annotations from Zotero.",
|
||||
"author": "mgmeyers",
|
||||
|
18
content/1,540,000nm of DSSC.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
date: 2025-02-18 19:16
|
||||
---
|
||||
#stub
|
||||
|
||||
[[Shih Wei-Chien]]のプロジェクト
|
||||
|
||||
[1,540,000nm of DSSC — Shih Wei Chieh](https://shihweichieh.com/1-540-000nm-of-DSSC)
|
||||
|
||||
FTOグラスは使う
|
||||
|
||||
TiO2を使う
|
||||
|
||||
ドクターブレードで塗布してるっぽい
|
||||
|
||||
透明導電電極 PT-1 白金?
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
date: "2024-02-06T08:42:13+0900"
|
||||
---
|
||||
#queer #history #research
|
||||
#queer #history #research #queercomputing
|
||||
|
||||
[[Jacob Gaboury]]によるRhizome.org上の連載シリーズ。(2013年)
|
||||
|
||||
|
@ -45,7 +45,7 @@ date: "2024-02-05T12:49:01+0900"
|
||||
こちらも苦手な人は特に序盤苦手だと思う。が、みんな言ってるように取り敢えず3巻までは読んでほしい。これも2000~2010年台の少年漫画ハーレムラブコメを下敷きにしつつ、そこで散見される都合の良さを全て都合よく終わらせないのがすごい。その上で扱うテーマがコスプレ=2.5次元であることで、フィクションの中の恋愛と現実の恋愛の交錯をメタに描くことにもなっている。ACEに関心がある人は頑張って140話(18巻)まで読んでほしい。フィクトロマンティックやポリアモリーの視点からも色々な語り方ができるはず。まだ未完なのでどう物語に決着つけるかも含めて楽しみ。
|
||||
|
||||
|
||||
## 映画
|
||||
## 映画・ドラマ
|
||||
|
||||
### そばかす
|
||||
|
||||
|
8
content/AT&Tのアーカイブ.md
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
date: 2025-05-13 14:30
|
||||
---
|
||||
|
||||
|
||||
[Sound and Science | The AT&T Archives and History Center](https://soundandscience.net/contributor-essays/the-att-archives-and-history-center/)
|
||||
|
||||
現在は予算削減によりSheldon Hochheiserというアーキビストが1人で管理しており、メールでの連絡とか受け付けてないらしい
|
@ -6,3 +6,9 @@ date: "2024-01-05T17:15:38+0900"
|
||||
[[Paul Theberge]]
|
||||
|
||||
[[越領域的イノベーション]]
|
||||
|
||||
> Lacking adequate knowledge of the technical system, musicians increasingly found themselves drawn to prefabricated programs as a source of new sound material. As I have argued, however, this assertion is not simply a state ment of fact; it also suggests a reconceptualization on the part of the industry of the musician as a particular type of consumer.(p89)
|
||||
|
||||
2023年にその後を振り返る論文が出ている([[Jonathan Sterne]]のAIマスタリング関係とかに言及あり)
|
||||
|
||||
[Any Sound You Can Imagine: Then and now | Intellect](https://intellectdiscover.com/content/journals/10.1386/jpme_00115_1)
|
9
content/Appropriate Technology.md
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
date: 2025-05-12 17:32
|
||||
---
|
||||
|
||||
reddit のカテゴリ
|
||||
|
||||
[# r/AppropriateTechnology / Reddit - The heart of the internet](https://www.reddit.com/r/AppropriateTechnology/)
|
||||
|
||||
Appropriate & Resilient Technologies
|
67
content/Arduino Uno R4.md
Normal file
@ -0,0 +1,67 @@
|
||||
---
|
||||
date: 2025-05-22 13:58
|
||||
---
|
||||
#arduino #electronics
|
||||
|
||||
[Arduino UNO R4 — Arduino Official Store](https://store.arduino.cc/pages/uno-r4)
|
||||
|
||||
チップがAVRからルネサスになった。
|
||||
|
||||
## 嬉しいポイント
|
||||
|
||||
- LeonardoやPro Microのように、HIDをエミュレートできる(マウスやキーボードの代わりになるUSBデバイスを自作できる)
|
||||
- アナログのオペアンプが2個ついている。
|
||||
- ADCを最大14bitの解像度で使える。
|
||||
- PWMだけじゃなくて12bitのDACも2系統使える。
|
||||
- 電源入力が12V→24Vまで対応になった。リニアレギュレーターじゃなくDCDCコンバーターになったので、熱の心配もない。
|
||||
- USB-Cなので助かる。
|
||||
|
||||
## つらいポイント
|
||||
|
||||
授業などで使っててわかったこと
|
||||
|
||||
### ピンの電流制限がキツい
|
||||
|
||||
[Arduino UNO R4 Minimaの許容出力電流は各I/Oピンが8mA、全出力端子の総和が60mA #テスト自動化 - Qiita](https://qiita.com/pbjpkas/items/859b273961d5d272b714)
|
||||
LEDを適当に直つなぎするとそれだけで過電流になりかねない。
|
||||
|
||||
### 過電流での壊れ方がへん
|
||||
|
||||
上とも関連するが、間違えて出力ピンをショートさせたり過電圧を加えると、基本そのピンだけでなくチップが丸ごと壊れる。
|
||||
|
||||
今まであったパターンでは、
|
||||
|
||||
- 電源を入れた途端チップが過熱してデバイスとしても認識されない
|
||||
- どんなスケッチを書き込んでもTX、RXのLEDが付きっぱなしになる
|
||||
|
||||
などがあった。壊れてるのか壊れてないのかの判別がつきにくいのがしんどい。
|
||||
|
||||
### コンパイル時間が長い
|
||||
|
||||
ツールチェーンのせいなのかわからんけど、軽くR3の4倍ぐらいはかかる。ESP32系をコンパイルするときの長さと似たような感じ。授業だと地味に死活問題。
|
||||
|
||||
### ADCTouchが使えない
|
||||
|
||||
AVR系では、ADCのピンを入力/出力モードを内部的に高速にスイッチすることで、ADCピンに導線を挿すだけでタッチセンサーとして使える高度なライブラリ[[ADCTouch]]があって便利だったのだが、これが使えない
|
||||
|
||||
ルネサスのチップには内部的にキャパシティブタッチセンサ用のピンが出ていて、それが実は背面のArduinoのMade with ❤の❤の部分に配線されているのでそれを引き伸ばすとタッチセンサが使えるという謎の裏技がある。普通に表面にピンか半田付できるランドを残しておいて欲しかった。
|
||||
|
||||
[How to access the Capacitive Touch Sensing Unit - UNO R4 WiFi - Arduino Forum](https://forum.arduino.cc/t/how-to-access-the-capacitive-touch-sensing-unit/1145940)
|
||||
|
||||
けど今調べたらなんか新しいの出てるな
|
||||
|
||||
[GitHub - delta-G/R4\_Touch: Capacitive Touch Sensing for the Arduino UNO-R4](https://github.com/delta-G/R4_Touch)
|
||||
|
||||
→使ってみたけど、手動キャリブレーションが割と必要な感じだった。まだ実用じゃないな
|
||||
|
||||
### Firmataがそのままでは使えない
|
||||
|
||||
Configurable Firmataを使って専用のConfigを書かないとダメだった。
|
||||
|
||||
Configurable Firmataをインストール後、`~Documents/Arduino/libralies/ConfigurableFirmata/src/utility/Boards.h`を以下のGistに上げたファイルと置き換える。
|
||||
|
||||
[Arduino Uno R4 minimaでConfigurable Firmataを動かすためのコード(817行目のIS\_PIN\_PWMを修正) ENABLE\_SERVOをUndefしないと動かない。 · GitHub](https://gist.github.com/tomoyanonymous/9efdd3e6063b9eeb0fb7527137a55c93)
|
||||
|
||||
サンプルスケッチのConfigurableFirmata→ConfigurableFirmataを開く
|
||||
|
||||
22行目の`ENABLE_SERVO`をコメントアウトして書き込み。これで動く
|
10
content/Barry Vercoe.md
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
date: 2025-01-27 12:44
|
||||
---
|
||||
#person
|
||||
|
||||
[[MUSIC N|MUSIC 11]] [[Csound]]の開発者。
|
||||
|
||||
MIT EMS(Electronic Music Studio)を立ち上げた人であり、 [[MIT Media Lab]]の初代所長。
|
||||
|
||||
https://web.media.mit.edu/~bv/cv.html
|
6
content/CIRMMT Distinguished Speaker Series.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
date: 2025-06-08 11:02
|
||||
---
|
||||
#scrap
|
||||
|
||||
[CIRMMT Distinguished Speaker Series Visualization](https://idmil.github.io/CIRMMT_visualizations)
|
@ -23,4 +23,4 @@ shredという論理時間ベースの計量スレッドみたいなものを言
|
||||
|
||||
また、Chugin(チャグイン)というC++で書かれたネイティブな拡張を読み込むこともでき、[[Faust]]や[[TouchDesigner]]、[[Unity]]のような他の環境との連携も多く実現しているほか、AI系などのプラグインも開発されている。
|
||||
|
||||
Chugenという独自[[Unit Generator|UGen]]をChucK言語上で定義するための機能もある([[CSound]]におけるUser-Defined OpCode)。
|
||||
Chugenという独自[[Unit Generator|UGen]]をChucK言語上で定義するための機能もある([[Csound]]におけるUser-Defined OpCode)。
|
||||
|
@ -0,0 +1,61 @@
|
||||
---
|
||||
cssclass: research-note
|
||||
type: "bookSection"
|
||||
author: "Nishino, Hiroki; Nakatsu, Ryohei"
|
||||
title: "Computer Music Languages and Systems: The Synergy Between Technology and Creativity"
|
||||
date: 2016-01-01
|
||||
citekey: Nishino2016
|
||||
tags:
|
||||
- research
|
||||
- bookSection
|
||||
---
|
||||
[[Nishino Hiroki]]
|
||||
|
||||
|
||||
> [!Cite]
|
||||
> Nishino, Hiroki, とRyohei Nakatsu. 2016. 「Computer Music Languages and Systems: The Synergy Between Technology and Creativity」. _Handbook of Digital Games and Entertainment Technologies_. [https://doi.org/10.1007/978-981-4560-52-8](https://doi.org/10.1007/978-981-4560-52-8).
|
||||
|
||||
>[!Synth]
|
||||
>**Contribution**::
|
||||
>
|
||||
>**Related**::
|
||||
>
|
||||
|
||||
[online](http://zotero.org/users/12014264/items/3R55NRDX) [local](zotero://select/library/items/3R55NRDX) [pdf](file:///Users/tomoya/Zotero/storage/JVBK3LZK/Nishino,%20Nakatsu%20-%202016%20-%20Computer%20Music%20Languages%20and%20Systems%20The%20Synergy%20Between%20Technology%20and%20Creativity.pdf)
|
||||
[pdf](file:///Users/tomoya/Zotero/storage/UKFT5TD2/Nishino,%20Nakatsu_2016_Handbook%20of%20Digital%20Games%20and%20Entertainment%20Technologies.pdf)
|
||||
|
||||
|
||||
|
||||
>[!md]
|
||||
> **FirstAuthor**:: Nishino, Hiroki
|
||||
> **Author**:: Nakatsu, Ryohei
|
||||
~
|
||||
> **Title**:: Computer Music Languages and Systems: The Synergy Between Technology and Creativity
|
||||
> **Year**:: 2016
|
||||
> **Citekey**:: Nishino2016
|
||||
> **itemType**:: bookSection
|
||||
> **Book**::
|
||||
> **ISBN**:: 978-981-4560-52-8
|
||||
|
||||
> [!LINK]
|
||||
>
|
||||
> [Attachment](file:///Users/tomoya/Zotero/storage/JVBK3LZK/Nishino,%20Nakatsu%20-%202016%20-%20Computer%20Music%20Languages%20and%20Systems%20The%20Synergy%20Between%20Technology%20and%20Creativity.pdf)
|
||||
> [PDF](file:///Users/tomoya/Zotero/storage/UKFT5TD2/Nishino,%20Nakatsu_2016_Handbook%20of%20Digital%20Games%20and%20Entertainment%20Technologies.pdf).
|
||||
|
||||
> [!Abstract]
|
||||
>
|
||||
> INTRODUCCIÓN 1 Estamos en un momento de pleno apogeo en lo que se refiere al uso de nuevas metodologías en la enseñanza del español como lengua extranjera, a la implementación de las tecnologías de la información y la comunicación, y a la inclusión de elementos lúdicos para mejorar la experiencia de enseñanza y aprendizaje. En este artículo queremos realizar una aproximación al concepto de gamificación o ludificación, un término ya presente en el ámbito empresarial y que recientemente se ha adaptado al contexto docente de lenguas extranjeras por las múltiples ventajas que pue-de ofrecer durante el aprendizaje. El uso del juego o sus elementos en el contexto de enseñanza y aprendizaje de len-guas extranjeras tiene como fin modificar el comportamiento de los aprendientes hacia el proceso de aprendizaje de la lengua meta; por ejemplo, conseguir que aumente su mo-tivación y que el aprendizaje sea significativo y duradero. No obstante, para conseguir este objetivo es necesario analizar previamente las características contextuales, atender a los objetivos curriculares y ante todo, tener en cuenta las necesidades específicas de los aprendientes. Este artículo tiene el objetivo principal de promover una reflexión sobre este térmi-no y su implementación en el aula, así como proponer una serie de ideas para imple-mentarlas en el contexto del aula. Por último, queremos despertar en otros profesores de lengua extranjera el interés y la curiosidad por implementar la gamificación en sus prácticas docentes. 1 Los datos presentados en este taller son una adaptación del taller titulado " Y tú, ¿gamificas? " impartido por Matías Hidalgo Gallardo y Antonia García Jiménez durante las III Jornadas de formación de profesores de ELE en Hong Kong (13-14 de marzo de 2015). 74 ¿QUÉ ES LA GAMIFICACIÓN? La conceptualización de este término tiene su origen en el mundo de los negocios, pues es en este contexto donde se empezó a utilizar. Así, Werbach y Hunter (2012) se-ñalan que la gamificación consiste en el uso de elementos de juegos y técnicas de diseño de juegos en contextos no lúdicos. Teniendo en cuenta en el contexto en el que nos encontramos como docentes, la definición que acabamos de presentar debe modificarse. Tomaremos como referencia la propuesta de Foncubierta y Rodríguez (2014) que definen la gamificación como la técnica o técnicas que el profesor emplea en el diseño de una actividad, tarea o proceso de aprendizaje (sean de naturaleza analógica o digital) introduciendo elementos del juego (insignias, lí-mite de tiempo, puntuación, dados, etc.) y/o su pensamiento (retos, competición, etc.) con el fin de enriquecer esa experiencia de aprendizaje, dirigir y/o modificar el comportamiento de los alumnos en el aula (Foncubierta y Rodriguez 2).
|
||||
>.
|
||||
>
|
||||
# Notes
|
||||
|
||||
![[The Computer Music Tutorial, second edition - Curtis Roads#Notes]]
|
||||
|
||||
|
||||
# Annotations%% begin annotations %%
|
||||
|
||||
|
||||
%% end annotations %%
|
||||
|
||||
|
||||
%% Import Date: 2025-01-20T16:45:55.224+09:00 %%
|
@ -0,0 +1,9 @@
|
||||
---
|
||||
date: 2025-02-18 20:20
|
||||
---
|
||||
#openscience #linux #solarpunk #stub
|
||||
|
||||
[Computer and Solar Training Center of Linux Friends(ALF)](https://sokolo.cronopios.org/index.php)
|
||||
|
||||
|
||||
|
@ -0,0 +1,66 @@
|
||||
---
|
||||
cssclasses:
|
||||
- research-note
|
||||
type: conferencePaper
|
||||
author: Holbrook, Ulf; Rudi, Joran
|
||||
editor: Torre, Giuseppe
|
||||
title: "Computer music and post-acousmatic practices: International Computer Music Conference 2022"
|
||||
date: 2022-07-03
|
||||
citekey: holbrook2022
|
||||
tags:
|
||||
- research
|
||||
- conferencePaper
|
||||
- "#computermusic"
|
||||
---
|
||||
|
||||
> [!Cite]
|
||||
> Holbrook, Ulf, とJoran Rudi. 2022. 「Computer music and post-acousmatic practices: International Computer Music Conference 2022」. _Proceedings of the International Computer Music Conference, ICMC 2022_, 編集者: Giuseppe Torre, 140–44. International Computer Music Conference, ICMC Proceedings. San Francisco: International Computer Music Association. [https://icmc2022.files.wordpress.com/2022/09/icmc2022-proceedings.pdf](https://icmc2022.files.wordpress.com/2022/09/icmc2022-proceedings.pdf).
|
||||
|
||||
>[!Synth]
|
||||
>**Contribution**::
|
||||
>
|
||||
>**Related**::
|
||||
>
|
||||
|
||||
[online](http://zotero.org/users/12014264/items/2QDBGAH5) [local](zotero://select/library/items/2QDBGAH5) [pdf](file:///Users/tomoya/Zotero/storage/NBRFF5ND/Holbrook%20et%20al.%20-%20Computer%20music%20and%20post-acousmatic%20practices.pdf)
|
||||
|
||||
|
||||
|
||||
>[!md]
|
||||
> **FirstAuthor**:: Holbrook, Ulf
|
||||
> **Author**:: Rudi, Joran
|
||||
~> **FirstEditor**:: Torre, Giuseppe
|
||||
~
|
||||
> **Title**:: Computer music and post-acousmatic practices: International Computer Music Conference 2022
|
||||
> **Year**:: 2022
|
||||
> **Citekey**:: holbrook2022
|
||||
> **itemType**:: conferencePaper
|
||||
> **Publisher**:: International Computer Music Association
|
||||
> **Pages**:: 140-144
|
||||
|
||||
> [!LINK]
|
||||
>
|
||||
> [Holbrook et al. - Computer music and post-acousmatic practices.pdf](file:///Users/tomoya/Zotero/storage/NBRFF5ND/Holbrook%20et%20al.%20-%20Computer%20music%20and%20post-acousmatic%20practices.pdf).
|
||||
|
||||
> [!Abstract]
|
||||
>
|
||||
> This short paper considers the practices of computer music through a perspective of the post-acousmatic. As the majority of music is now made using computers, the question emerges: How relevant are the topics, methods, andconventions from the “historical” genre of computer music? Originally an academic genre confined to large mainframes, computer music’s tools and conventions have proliferated and spread to all areas of music-making. As agenre steeped in technological traditions, computer music is often primarily concerned with the technologies of its own making, and in this sense isolated from the social conditions of musical practice. The post-acousmatic is offeredas a methodological perspective to understand technology based music, its histories, and entanglements.
|
||||
>.
|
||||
>
|
||||
# Notes
|
||||
|
||||
[[ポストアクースマティック]]とコンピューター音楽という言葉の結びつきについて検討する論文
|
||||
|
||||
> From its inception and up until today, computer music composers have sought and gained new tools, and have shifted their methods towards uses of high-level software on computers and portable tablets. Most newer tools abstract the signal processing routines and variables, making them easier to use while removing the need for understanding the underlying processes in order to create meaningful results.
|
||||
|
||||
これも結構欺瞞では?
|
||||
|
||||
|
||||
|
||||
# Annotations%% begin annotations %%
|
||||
|
||||
|
||||
%% end annotations %%
|
||||
|
||||
|
||||
%% Import Date: 2025-01-21T19:08:40.356+09:00 %%
|
5
content/Curtis Roads.md
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
date: 2025-01-20 16:46
|
||||
---
|
||||
#stub #person
|
||||
|
24
content/DIYブレスセンサー.md
Normal file
@ -0,0 +1,24 @@
|
||||
#electronics
|
||||
|
||||
|
||||
[気圧センサーモジュール,デジタル,液面コントローラーボード,arduino 3.3v-5v,3.3v-5v,0-40kpa用 - AliExpress 502](https://www.aliexpress.com/item/1005007954669668.html)
|
||||
|
||||
[Interfacing HX710 Differential ADC with Arduino](https://swharden.com/blog/2022-11-14-hx710b-arduino/)
|
||||
|
||||
安いけど出力デジタルなのかー
|
||||
|
||||
|
||||
---
|
||||
|
||||
結局1500円するけどこれがアナログ出力で安定なのかも
|
||||
|
||||
[気圧センサー MIS-2500-015G(5V): 計測器・センサー・ロガー 秋月電子通商-電子部品・ネット通販](https://akizukidenshi.com/catalog/g/g107272/)
|
||||
|
||||
買って使ってみた。レスポンスはいいけど大体200~400ぐらいのレンジにしかならないっぽい(完全に息の出口をふさいで頑張って吹き込んで1000いくか行かないか)。一段下の006Gでもいいかも?
|
||||
|
||||
[Egbo-ガス圧力センサー送信機モジュール,電子血圧計,気圧計,0-40kpaホース,3.3v,5v,xgzp6847a,1〜5個 - AliExpress 502](https://www.aliexpress.com/item/1005006297405668.html)
|
||||
|
||||
これがちょうど06G相当っぽい
|
||||
|
||||
|
||||
|
@ -20,7 +20,7 @@ date: "2023-10-12T18:33:49+0900"
|
||||
|
||||
500°まで上昇(13分)、ヒーター停止
|
||||
|
||||
明らか量が多すぎた気がするので[[酸化亜鉛系トランジスタ#Fabrication of Flexible Metal Oxide Thin Film Transistor by Indigenously Developed Spray Pyrolysis Unit]]を参考にしてみる
|
||||
明らか量が多すぎた気がするので[[DIY酸化亜鉛系トランジスタ#Fabrication of Flexible Metal Oxide Thin Film Transistor by Indigenously Developed Spray Pyrolysis Unit]]を参考にしてみる
|
||||
|
||||
- Zn Acetate Dehydrate 0.3mol/L
|
||||
- メタノールとDI Water2:1の混合溶媒
|
||||
|
26
content/DIY半導体-実験ノート4.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
date: 2025-02-07 15:08
|
||||
---
|
||||
#research #memo
|
||||
|
||||
2025/02/07
|
||||
|
||||
|
||||
前作った溶液
|
||||
- 0.3gの北国ノール
|
||||
- 0.6585gのZnAcDh(0.3mol/Lをさらに1/50)
|
||||
- 全体10mlになるまでDI Water
|
||||
|
||||
これにクエン酸を0.3mol/Lキレート剤として混ぜる
|
||||
クエン酸のモル質量 192.124g/mol
|
||||
`192.124*0.3/100 = 0.576372(g)`
|
||||
|
||||
2025/02/12
|
||||
|
||||
3枚ガラスプレート上に作成
|
||||
|
||||
90度で乾燥しながら5回スピンコート
|
||||
|
||||
→300°で13分中間乾燥
|
||||
|
||||
|
@ -21,6 +21,8 @@ date: "2023-08-24T22:38:07+0900"
|
||||
|
||||
[[DIY半導体-実験ノート3]]
|
||||
|
||||
[[DIY半導体-実験ノート4]]
|
||||
|
||||
[[複製するメディアではなく、“刷られた装置”としてのコンピューター ─ 芸術とデザインの視点からのDIY半導体]]
|
||||
|
||||
[[実験ベースの科学のフラジャイルさ]]
|
||||
@ -53,9 +55,9 @@ https://www.bookpark.ne.jp/cm/ieej/detail/IEEJ-ECT16039-PDF/
|
||||
|
||||
### 金属酸化物
|
||||
|
||||
[[酸化亜鉛系トランジスタ]]
|
||||
|
||||
[[DIY酸化亜鉛系トランジスタ]]
|
||||
|
||||
[[色素増感太陽電池]]
|
||||
### 有機トランジスタ
|
||||
|
||||
[[Water-Gated n-Type Organic Field-Effect Transistors for Complementary Integrated Circuits Operating in an Aqueous Environment]]
|
||||
|
10
content/DIY半導体の測定.md
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
date: 2025-02-04 13:31
|
||||
---
|
||||
#semiconductor
|
||||
|
||||
[[カーブトレーサー]]
|
||||
|
||||
[[ソースメジャーユニット]]
|
||||
|
||||
[[ピコアンメーター]]
|
101
content/DIY可能なトランペット.md
Normal file
@ -0,0 +1,101 @@
|
||||
#windinstrument
|
||||
|
||||
下調べ
|
||||
|
||||
[[管楽器の3Dプリント]]
|
||||
|
||||
[[金管楽器のピストンバルブの自作]]
|
||||
|
||||
---
|
||||
|
||||
テレキャスターみたいに、ボルトオンで組み合わせるかんじにする
|
||||
|
||||
メインのパイプは真鍮、ピストンのケーシングは木材をCNCで削り出し、残りは3Dプリントで
|
||||
|
||||
![[img/スクリーンショット 2025-04-06 162931.png]]
|
||||
|
||||
黄色い部分はPETGやTPUで担当する
|
||||
|
||||
![[img/スクリーンショット 2025-04-06 163225.png]]
|
||||
|
||||
それぞれの収縮率を測って3Dプリント時の真鍮パイプとの嚙み合わせを把握する必要がある
|
||||
|
||||
1,2,3番の内径は13㎜になる必要があるが、大体12.7mmになっている
|
||||
|
||||
収縮率は大体オリジナルの97.5%
|
||||
|
||||
102.3%にして印刷すりゃちょうどいい感じ
|
||||
|
||||
マウスピースレシーバーの長さが70㎜のところ69.95mm = 1.0007148 これはぜんぜん縮んでないな
|
||||
|
||||
ボルト間の水平距離が22.5/22.13 =1.01671939
|
||||
|
||||
## プリント設定
|
||||
|
||||
メインのパーツはeSUNのmatte PLA、ライトグレーという青っぽいグレーを使う。
|
||||
|
||||
これがなんか定着悪いというか全体的に細く層間接着もあまりよくなさそう。Creality PrintのGeneric PLAはFlow Rateが95%になっているので、100%にもどした。
|
||||
|
||||
上の収縮サイズはモデルごとに適用する前提として、シームは可能な限りスカーフジョイントの設定をオンにする。
|
||||
|
||||
トップサーフェスのアイロニングもオンにする。
|
||||
|
||||
![[img/スクリーンショット 2025-04-06 200807.png]]
|
||||
![[img/スクリーンショット 2025-04-06 200826.png]]
|
||||
|
||||
サポートはオーガニックサポートをオンにする。あんまり細かい段差でサポートすると、マウスピースレシーバーとかの内側までサポートが付くので、1㎜でスレッショルドつける
|
||||
|
||||
![[img/スクリーンショット 2025-04-06 200834.png]]
|
||||
|
||||
だいたいこんなかんじになる
|
||||
|
||||
![[img/スクリーンショット 2025-04-06 201241.png]]
|
||||
|
||||
反り防止の設定として、BrimのOuter Brim Onlyを有効化して、各モデルを近づけてブリム同士がくっつくようにすると全体の定着が安定する。
|
||||
|
||||
Adaptive Layer HeightとOrganic Supportが一緒に使えないのが惜しい、、、
|
||||
|
||||
[Organic support with variable height · Issue #9462 · prusa3d/PrusaSlicer](https://github.com/prusa3d/PrusaSlicer/issues/9462)
|
||||
|
||||
## ケーシングのNC切削
|
||||
|
||||
ウォルナットを使用。
|
||||
|
||||
AMCにあるRoland MD-40XAを使う。ATCないので手動で頑張って工具を変える
|
||||
|
||||
使う工具:
|
||||
|
||||
- 6mmフラットエンドミル
|
||||
- 2mmボールエンドミル(2mmの径で1.7cmぐらいまで彫り込めるもの)
|
||||
- 3mmドリル(ぴったり3mmの穴が開けられればなんでも良い。2mmのボールエンドミルでもいけそう
|
||||
|
||||
Fusionからツールパスを生成するときのメモ:
|
||||
|
||||
**原点座標はユーザー座標系ではなく、G54を設定する**
|
||||
|
||||
切り込みピッチは0.5mm 回転数は15000rpm 切り込み速度F 2000mm/min
|
||||
|
||||
もうちょっとツールの数減らして簡易的に作る方法はないもんか
|
||||
|
||||
真ん中で割らずに縦横の板材接着の方向でもいいかもしれない
|
||||
|
||||
ボトムプレート、トッププレートがスライドイン方式だと削る寸法がややこしくなるので、2点貫通ネジ止めとかでもいいかも(言うてそんなに頻繁に取り外す場所でもないし)
|
||||
|
||||
板厚が上手く規格材の寸法になれば加工はかなり楽になる、場合によってはレーザーでもいけるかも
|
||||
|
||||
ケーシングの仕切りは横に穴を貫通させる必要があるので、治具作って手動ドリルかなあ。というか、接着してから側板に空いてる穴をガイドにしてそのまま貫通させればいいのか
|
||||
|
||||
## 組み立て
|
||||
|
||||
パイプの長さ
|
||||
|
||||
- [x] 1stアウター 45.55mm
|
||||
- [ ] 1stインナー 55㎜
|
||||
- [x] 2ndインナー
|
||||
- [x] 3rdアウター 98㎜
|
||||
- [ ] 3rdインナー 70mm
|
||||
- [ ] チューニング管インナー 88㎜x2
|
||||
- [x] チューニング管アウター 114.84mm
|
||||
- [x] マウスパイプ 202㎜
|
||||
- [x] ベルリードパイプ 92.5㎜
|
||||
|
80
content/DIY酸化亜鉛系トランジスタ.md
Normal file
@ -0,0 +1,80 @@
|
||||
---
|
||||
title: 酸化亜鉛系トランジスタ
|
||||
tags:
|
||||
- research
|
||||
- survey
|
||||
- semiconductor
|
||||
date: "2023-10-07T15:16:13+0900"
|
||||
---
|
||||
|
||||
## 方針
|
||||
|
||||
- NO真空装置
|
||||
- NO高温 (~500°、できれば~300°C、電子工作用のホットプレートで処理できる)
|
||||
- NO特殊薬品
|
||||
|
||||
![[img/スクリーンショット 2025-02-07 14.32.12.png]]
|
||||
|
||||
[ゾルゲル成膜した酸化亜鉛薄膜の乾燥温度依存性と TFT 特性(2015)](https://confit.atlas.jp/guide/event-img/jsap2015s/11p-D1-10/public/pdf?type=in)
|
||||
|
||||
## 材料
|
||||
|
||||
### 基板
|
||||
|
||||
ガラスプレート(化学用のスライドガラス)
|
||||
### 半導体
|
||||
|
||||
[[酢酸亜鉛]]ベースの溶液をスピンコートして、[[酸化亜鉛]]ベースのトランジスタを作る。溶媒は水とエタノールもしくはIPAの混合、キレート剤としては普通エタノールアミン系を使うが、クエン酸で代用する。
|
||||
|
||||
加熱は300度でなんとかなる想定
|
||||
|
||||
### 電極
|
||||
|
||||
導電性ペーストで頑張る。100°Cくらいで焼結、逆に250°Cとかに加熱すると焼けて朽ちるので、工程の順序が制限される。
|
||||
|
||||
### 誘電体(ゲート絶縁膜)
|
||||
|
||||
ポリビニルアルコール(PVA)をスピンコート
|
||||
|
||||
## 工程
|
||||
|
||||
- 事前準備
|
||||
- 電極のマスク(スクリーン)作成
|
||||
- xtoolのレーザーカッタースクリーン製版が楽そう
|
||||
- 櫛形電極でソース・ドレインを作る
|
||||
- 半導体前駆体溶液の作成
|
||||
- 誘電体の溶液作成
|
||||
|
||||
- 基板の洗浄(超音波洗浄、できれば塩酸とか)
|
||||
- 半導体の成膜
|
||||
- スピンコート
|
||||
- 中間乾燥
|
||||
- 焼結、冷却
|
||||
- ソース・ドレイン電極のスクリーンプリント
|
||||
- 導電ペースト加熱処理
|
||||
- ここまでで、紫外線当てて電流量の変化を見られる
|
||||
- 誘電体成膜
|
||||
- 電極にマスクする(テープとか)
|
||||
- スピンコート
|
||||
- 乾燥
|
||||
- ゲート電極のスクリーンプリント
|
||||
- 誘電体を破壊しないように気を付ける
|
||||
- 導電ペースト加熱処理
|
||||
|
||||
|
||||
|
||||
## その他の方法
|
||||
|
||||
- アルミ板をベースにして、酸化皮膜をゲート絶縁膜にしたら楽なのでは?
|
||||
- [[Homemade Thin-Film Transistor Experiments#次にやること]]曰く、絶縁破壊起きてダメだったらしい
|
||||
- 原因は分かってないが、アルミの膨張率が大きいのでひび割れたのではと
|
||||
- ただ、酸化被膜の作り方にもいろいろあるみたいなので必ずしも不可能ではないのでは
|
||||
|
||||
- 液体誘電体にするのはどうか
|
||||
- ソース・ドレイン電極に触らないような形にできれば良さそう
|
||||
|
||||
## [[酸化亜鉛トランジスタ-サーベイ]]
|
||||
|
||||
## [[蜂蜜誘電体トランジスタ]]
|
||||
|
||||
## [[スクリーンプリントでのトランジスタ]]
|
6
content/Daria Mill.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
date: 2025-05-15 13:58
|
||||
---
|
||||
#person
|
||||
|
||||
カールスルーエ・アート・アンド・メディアセンター(ZKM)キュレーター兼リサーチ・アソシエイト。ニューメディア、アート、科学とテクノロジーの交わる点を研究の中心としている。また、既成概念に疑問を投げかけ、より持続可能な作品や制作方法を導入することで、芸術機関を変革の担い手と捉える実験的手法の領域にも特に関心を寄せている。ドイツ国内外で大規模な展覧会プロジェクトの企画・運営に多く携わり、実際の空間とデジタル空間の両方で活動してきた。
|
16
content/Do we still need computer music.md
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
date: 2025-01-17 22:54
|
||||
---
|
||||
#scrap #computermusic
|
||||
|
||||
Do We Still Need Computer Music? [[Eric Lyon]] (Talk given at EMS 2006, Beijing)
|
||||
|
||||
[Do\_We\_Still\_Need\_Computer\_Music.pdf](https://disis.music.vt.edu/eric/LyonPapers/Do_We_Still_Need_Computer_Music.pdf)
|
||||
|
||||
> A possible criticism of our instrumental definition of computer music is that it is stylistically agnostic.
|
||||
|
||||
> For this reason a distinction between category and genre is critical.
|
||||
|
||||
[[やや弱いコンピュータ音楽]] で書いたことの前提は人通りまとまっている
|
||||
|
||||
これむしろ逆の感想を抱いたな、完全にstyle-agnosticな形でコンピューター音楽を定義する方が有益な議論を導けるように思える
|
@ -30,7 +30,7 @@ https://chci.pages.dev/aist-seminar
|
||||
私は自分のことを音楽土木工学(Civil Engineering of Music)との研究者と呼んでいます。
|
||||
これは実際には存在しない学問領域ですが、名前の通り、テクノロジーを音楽に応用するのではなく、音楽実践を通じて基幹的な技術インフラを考え直す学問です。
|
||||
|
||||
具体的には、自作楽器を使っての演奏活動や、音楽のためのプログラミング言語”mimium”の開発や、今日お話しするプログラマブルな音楽ソフトウェアotopoiesisの設計などに取り組んでいます。
|
||||
具体的には、自作楽器を使っての演奏活動や、音楽のためのプログラミング言語”[[mimium]]”の開発や、今日お話しするプログラマブルな音楽ソフトウェアotopoiesisの設計などに取り組んでいます。
|
||||
|
||||
|
||||
## 音楽とプログラミングの歴史
|
||||
@ -44,7 +44,7 @@ https://chci.pages.dev/aist-seminar
|
||||
多くはプラグインという音にエフェクトをかけたり、インストゥルメント、つまりノートの情報を受け取り音声を出力するシンセサイザーを組み合わせることで機能を拡張していきます。
|
||||
|
||||
|
||||
一方で、同様にコンピューター上で音を作成するソフトウェアとして、[[Cycling'74 Max]]に代表される音のためのプログラミング環境が挙げられます。
|
||||
一方で、同様にコンピューター上で音を作成するソフトウェアとして、[[Max|Cycling'74 Max]]に代表される音のためのプログラミング環境が挙げられます。
|
||||
|
||||
Maxのようなビジュアルプログラミング環境はポピュラーではありますが、[[SuperCollider]]や[[Sonic Pi]]のようなテキストベースの言語も多く存在します。
|
||||
|
||||
@ -201,7 +201,7 @@ https://github.com/bitwig/dawproject
|
||||
|
||||
昔ながらのもので言えば、OMFやAAF(オーディオ・ビデオデータのみ)とSMF(シーケンスデータ、複数のMIDIデータの集合)とか
|
||||
|
||||
Program as a Format - MPEG-Structured Audio(CSoundベースの音源配布フォーマット)
|
||||
Program as a Format - [[MPEG4 Structured Audio]]([[CSound]]ベースの音源配布フォーマット)
|
||||
|
||||
|
||||
## [[otopoiesis]]について
|
||||
|
4
content/Donald Norman.md
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
date: 2025-01-24 10:38
|
||||
---
|
||||
#person
|
6
content/Early Television Musium.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
date: 2025-05-13 15:04
|
||||
---
|
||||
#website
|
||||
|
||||
https://earlytelevision.org
|
21
content/EduroamのゲストID一覧をコピペしやすくする.md
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
date: 2025-03-26 13:48
|
||||
---
|
||||
#tips #snippets
|
||||
|
||||
Eduroamではゲスト用のIDを複数個まとめて発行できるのだが、IDとパスワード一覧がコピペしづらくてしょうがない(かつ、パスワードには記号も混じっているため目コピが失敗しやすい)。
|
||||
そこで、右クリック→検証から開発ツールを開いてコンソールのタブを開き、
|
||||
|
||||
ID一覧:
|
||||
|
||||
```js
|
||||
$$("body > p:nth-child(9) > table > tbody > tr").filter((node,idx) => idx%3==0).map(node=> node.querySelector("td > tt").childNodes[0].textContent)
|
||||
```
|
||||
|
||||
パスワード一覧:
|
||||
|
||||
```js
|
||||
$$("body > p:nth-child(9) > table > tbody > tr").filter((node,idx) => idx%3==1).map(node=> node.querySelector("td > tt").childNodes[0].textContent)
|
||||
```
|
||||
|
||||
で取得した結果をコピーする。これをスプレッドシートとかに貼り付けて使う(カンマがつくので頑張って取り除こう)
|
12
content/Electroacoustic Music Studies Asia Network.md
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
date: 2025-06-10 16:07
|
||||
---
|
||||
#group
|
||||
|
||||
[6.Battier.pdf](https://data.jssa.info/paper/2012v04n01/6.Battier.pdf)
|
||||
|
||||
[Electroacoustic Music Studies Asia Network \[EMSAN\] | IReMus](https://www.iremus.cnrs.fr/en/base-de-donnees/electroacoustic-music-studies-asia-network-emsan)
|
||||
|
||||
データベース
|
||||
|
||||
[EMSAN: The Electroacoustic Music Studies Asia Network](http://emsan.lib.ntnu.edu.tw/about.jsp)
|
6
content/Epistemic Tools.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
date: 2025-01-29 14:36
|
||||
---
|
||||
#notion #computermusic
|
||||
|
||||
コンピューターを使った楽器には、作った人の音楽に対する認識論が埋め込まれる。
|
14
content/Esolangなシンセサイザー.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
date: 2025-06-06 12:29
|
||||
---
|
||||
#memo #idea
|
||||
|
||||
[[uiua]]みたいなカスタムキャラクター使いまくった独自言語
|
||||
|
||||
三十文字ぐらい上限で複雑な音が出せると良い
|
||||
|
||||
何かしらBF的なプログラム制御構造が加えられると面白い
|
||||
|
||||
ソフトシンセ、VST、Web、ハードウェア(メカニカルキーボード+小型ディスプレイとか)いろんなバリエーションが作れたら楽しそう
|
||||
|
||||
|
@ -0,0 +1,14 @@
|
||||
---
|
||||
title: "Experiments in Art and Technology (E.A.T.): 女性アーティストの参画 — AWARE-日本"
|
||||
url: "https://awarewomenartists.com/articles_japan/experiments-in-art-and-technology-e-a-t-%E5%A5%B3%E6%80%A7%E3%82%A2%E3%83%BC%E3%83%86%E3%82%A3%E3%82%B9%E3%83%88%E3%81%AE%E5%8F%82%E7%94%BB/"
|
||||
date: "2025-05-13 16:52:43"
|
||||
---
|
||||
#scrap
|
||||
|
||||
# Experiments in Art and Technology (E.A.T.): 女性アーティストの参画 — AWARE-日本
|
||||
|
||||
author: [[Daria Mill]]
|
||||
|
||||
https://awarewomenartists.com/articles_japan/experiments-in-art-and-technology-e-a-t-%E5%A5%B3%E6%80%A7%E3%82%A2%E3%83%BC%E3%83%86%E3%82%A3%E3%82%B9%E3%83%88%E3%81%AE%E5%8F%82%E7%94%BB/
|
||||
|
||||
>
|
10
content/Feminist Hackerspaces as Safer Spaces?.md
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
date: 2025-06-06 13:04
|
||||
---
|
||||
#scrap #feminism
|
||||
|
||||
[[Sophie Toupin]]
|
||||
|
||||
[Feminist Hackerspaces as Safer Spaces? | .dpi](https://dpi.studioxx.org/en/feminist-hackerspaces-safer-spaces)
|
||||
|
||||
|
10
content/FoxDot.md
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
date: 2025-05-22 17:55
|
||||
---
|
||||
#software #programming-language #python
|
||||
|
||||
[[Python]]で書かれた[[SuperCollider]]クライアント形式の、[[ライブコーディング]]を想定したプログラミング言語。
|
||||
|
||||
最近はもうあまり更新されてないらしい。
|
||||
|
||||
[GitHub - Qirky/FoxDot: Python driven environment for Live Coding](https://github.com/Qirky/FoxDot)
|
5
content/Frugal innovation.md
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
date: 2025-06-09 09:30
|
||||
---
|
||||
#notion
|
||||
|
9
content/GOSH.md
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
date: 2025-02-18 20:21
|
||||
---
|
||||
#openscience
|
||||
|
||||
科学系の機器のオープン化を進めるコミュニティ
|
||||
|
||||
|
||||
[Home - Gathering for Open Science Hardware](https://openhardware.science/)
|
8
content/GaudiLab.md
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
date: 2025-02-18 20:18
|
||||
---
|
||||
#diy-bio #openscience
|
||||
|
||||
[Welcome to » The GaudiLab](https://www.gaudi.ch/GaudiLabs/?page_id=2)
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
date: "2023-10-13T12:44:10+0900"
|
||||
---
|
||||
#paper #self-hosted #semiconductor
|
||||
#paper #self-hosted #semiconductor #solar #lowtech
|
||||
|
||||
|
||||
author: [[Kris De Decker]]
|
||||
|
198
content/ICMC2025 ペーパーセッション聴講メモ.md
Normal file
@ -0,0 +1,198 @@
|
||||
---
|
||||
date: 2025-06-09 09:25
|
||||
---
|
||||
#memo
|
||||
|
||||
|
||||
## Interactive Music Co-Creation with an Instrumental Technique-Aware System: A Case Study with Flute and Somax2
|
||||
|
||||
[[Somax2]]
|
||||
|
||||
## Prototyping Digital Musical Instruments in Virtual Reality
|
||||
|
||||
VR内でのプロトタイピング。現実空間でのプロトタイプがゴミめっちゃ出るというところを起点にしてるのは、本当か?みたいな気がする
|
||||
|
||||
tactilityがないのは厳しいですよね、そりゃそう
|
||||
|
||||
[[Frugal innovation]]ね
|
||||
|
||||
Questのコントローラーに磁石つけて擬似触覚的な機能を持たせる
|
||||
|
||||
スケッチ→モデリング→Audio Design→ファブリケーション(実際にはいろんな行き来がある)
|
||||
|
||||
何を使うか
|
||||
|
||||
- [[TinkerCad]]
|
||||
- [[PureData]] / [[MetaSounds]] / [[Max]]
|
||||
- [[Unity]]&[[LibPd]]
|
||||
- [[Steam Audio]] for Spatialization
|
||||
- Meta Quest2
|
||||
|
||||
例えば、加速度メーターとボタンの楽器のデモを3Dモデルを作ってからVR空間でまず試す
|
||||
|
||||
6人で実験 Pdパッチやモデルはあらかじめテンプレートを渡す
|
||||
|
||||
---
|
||||
## PyChiro: Real-Time Gesture Recognition System Supporting the Kodály Chironomic Method in Music Education, Therapy, and Performance
|
||||
|
||||
|
||||
カメラをベースにした手のジェスチャーインタラクションシステム
|
||||
|
||||
ハンガリーにある[Kodály method](https://en.wikipedia.org/wiki/Kod%C3%A1ly_method)
|
||||
|
||||
[[コダーイ]]っていうのね
|
||||
|
||||
[コダーイ・コンセプト – 日本コダーイ協会](http://kodaly.jp/concept/)
|
||||
|
||||
音程を表すハンドサインがあるらしい
|
||||
|
||||
[The Kodaly Method: Fun hand-signing Games for young Musicians](https://mobilemusicschool.ie/kodaly-method-music-games/)
|
||||
|
||||
オクターブが腕の角度で変わるけど、それも検出
|
||||
|
||||
## VibroTempo: Feel the Beat with Vibrational Metronomes for Musicians
|
||||
|
||||
振動メトロノーム
|
||||
|
||||
アクセント付きで鳴らせる 目的としては音楽教育っぽい
|
||||
|
||||
アクセント付きビートとかだと伝統的なメトロノームと結果が変わってくる
|
||||
|
||||
## ZAP!: Bio-Inspired Large-Scale Multimedia Installation
|
||||
|
||||
電気ウナギにインスパイアされたサウンドインスタレーション
|
||||
|
||||
[[バイオミミクリー]]
|
||||
|
||||
## Music102: An D12-equivariant transformer for chord progression accompaniment
|
||||
|
||||
群論をコード進行の表現に使う
|
||||
|
||||
シューアの補題
|
||||
|
||||
[2410.18151 Music102: An D12-equivariant transformer for chord progression accompaniment](https://arxiv.org/abs/2410.18151)
|
||||
|
||||
結局メロディーの中からコード進行を抽出するっていうのでいいのかな
|
||||
|
||||
---
|
||||
|
||||
## AI音楽パネル
|
||||
|
||||
### 中央音楽学院(CCOM)
|
||||
|
||||
AI音楽だけでセンターができてる(中にラボが6つあるとか)
|
||||
|
||||
規模的にかなわんわー
|
||||
|
||||
モデル構築も作品制作もやってる ロボット音楽みたいなのも結構ある
|
||||
|
||||
[CENTRAL CONSERVATORY OF MUSIC](https://en.ccom.edu.cn/)
|
||||
|
||||
MUSIC Demixing challenge の2023もトップ取ったとか
|
||||
|
||||
diffusionモデルで音から弦の左手のポジションを推定(Vtuberとかには便利そうだな)
|
||||
|
||||
### Lamberto Coccioh
|
||||
|
||||
Music and Technology at Royal Birmingham Conservatoire (RBC), Birmingham City University
|
||||
|
||||
Ontlogical paradox of artificial intelligence in musical composition
|
||||
|
||||
何のために生成AI音楽を作るのか?作るとは何なのか?
|
||||
|
||||
framing AI through technological determinism
|
||||
|
||||
生成AIが与えうるインパクトを理解しきれないまま作ってる面があるのでは
|
||||
|
||||
radical other form of agency
|
||||
|
||||
animist entitiesなのではという(妖精?)
|
||||
|
||||
AI as machine vs AI as person
|
||||
|
||||
paradoxes:
|
||||
|
||||
- 現状のAIは資本主義の産物であるという問題
|
||||
- 人間のcognitionとは関係がないこと
|
||||
- AI exploits and automates creativity but cannot be controlled
|
||||
|
||||
"controlled emergence"
|
||||
|
||||
トレーニングデータの制限
|
||||
|
||||
結構ラディカルに規制派だな
|
||||
|
||||
|
||||
Biocultural Diversity
|
||||
|
||||
言語の絶滅にどう対策をするか?に似ている
|
||||
|
||||
"Safeguarding creativity for future generation"
|
||||
|
||||
### Carlos Arana
|
||||
|
||||
the University of Buenos Aires / Barklee College
|
||||
|
||||
AI in Music: A Pedagogical Approach for Productive and Responsible Music
|
||||
|
||||
Music work lifecycle (含むconsumption)のいろんなところでAIが使えるよねということ
|
||||
|
||||
pedagogical approach :task - technology - application - evaluation
|
||||
|
||||
4 week course AI for music and audio (barklee online)
|
||||
|
||||
例えば音源分離なら、一通り原理を学び、実際のアプリとかも使い、その後これらをどう使うか?について評価する これをあらゆる分野でやる
|
||||
|
||||
LANDRとかSunoとかMagentaみたいなのを一歩引いてみるのを教育として持ち込んでるのは面白いなあ
|
||||
|
||||
telent/hit-song detectionとかまでカバーしてるのかあ
|
||||
|
||||
倫理・法の面もカバーする
|
||||
|
||||
### Marc Battier
|
||||
|
||||
knowledge is cumulative / implicit knowledges are easily lost.
|
||||
|
||||
シュトックハウゼンのKthinka's Gesangのアシスタントを務めたのは誰か、フランソワ・ベイルのアシスタントは誰か聞いてみたけど、当然間違った答えを返した
|
||||
|
||||
信頼できないだけじゃなくて、ミスリーディングである
|
||||
|
||||
コンピューター音楽の作曲における暗黙知とは何か、それらをなるべく構造化していくこと
|
||||
|
||||
EMSAN :[[Electroacoustic Music Studies Asia Network]]
|
||||
|
||||
|
||||
アジアで作られた電子音楽を集めるデータベースを作った
|
||||
|
||||
[EMSAN: The Electroacoustic Music Studies Asia Network](http://emsan.lib.ntnu.edu.tw/about.jsp)
|
||||
|
||||
### Georg Hajdu
|
||||
|
||||
Healing Soundscape
|
||||
|
||||
style-agnostic なヒーリング音楽ね、、、
|
||||
|
||||
ゲシュタルト心理学
|
||||
|
||||
音の”ブーバ”化(角が取れるみたいなことかな、、、)
|
||||
|
||||
𰻞
|
||||
|
||||
ラテントスペース使ってるよってことね
|
||||
|
||||
### Kenneth Fields
|
||||
|
||||
University of China Academy
|
||||
|
||||
Chronotechnics
|
||||
|
||||
ネットワーク音楽をずっとやってる人っぽい
|
||||
|
||||
全然AI関係なさそう、、なんで選ばれた?
|
||||
|
||||
Netronomia
|
||||
|
||||
[NetronomiaII by Ken Fields](https://rdmc.nottingham.ac.uk/handle/internal/11989)
|
||||
|
||||
|
||||
|
14
content/IEZAフレームワーク.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
date: 2025-03-08 13:41
|
||||
---
|
||||
#gameaudio
|
||||
|
||||
ゲームオーディオの機能を、Diegetic-NonDiegeticと、Activity-Settingという二軸で整理すると、次の4象限に整理できる
|
||||
|
||||
- Interface
|
||||
- Effect
|
||||
- Zone
|
||||
- Affect
|
||||
|
||||
|
||||
[IEZA: a framework for game audio – Captivating Sound](https://captivatingsound.com/ieza-framework-for-game-audio/)
|
@ -0,0 +1,60 @@
|
||||
---
|
||||
cssclass: research-note
|
||||
type: "journalArticle"
|
||||
author: "Mathews, Max; Roads, C"
|
||||
title: "Interview with Max Mathews"
|
||||
publication: "Computer Music Journal"
|
||||
date: 1980-01-01
|
||||
citekey: Mathews1980
|
||||
tags:
|
||||
- research
|
||||
- journalArticle
|
||||
---
|
||||
|
||||
> [!Cite]
|
||||
> Mathews, Max, とC Roads. 1980. 「Interview with Max Mathews」. _Computer Music Journal_ 4 (4): 15–22.
|
||||
|
||||
>[!Synth]
|
||||
>**Contribution**::
|
||||
>
|
||||
>**Related**::
|
||||
>
|
||||
|
||||
[online](http://zotero.org/users/12014264/items/7N7TIZHU) [local](zotero://select/library/items/7N7TIZHU) [pdf](file:///Users/tomoya/Zotero/storage/ZAQ37PDB/Mathews,%20Roads%20-%201980%20-%20Interview%20with%20Max%20Mathews.pdf)
|
||||
[pdf](file:///Users/tomoya/Zotero/storage/GFPCD4VD/full-text.pdf)
|
||||
|
||||
|
||||
|
||||
>[!md]
|
||||
> **FirstAuthor**:: Mathews, Max
|
||||
> **Author**:: Roads, C
|
||||
~
|
||||
> **Title**:: Interview with Max Mathews
|
||||
> **Year**:: 1980
|
||||
> **Citekey**:: Mathews1980
|
||||
> **itemType**:: journalArticle
|
||||
> **Journal**:: *Computer Music Journal*
|
||||
> **Volume**:: 4
|
||||
> **Issue**:: 4
|
||||
> **Pages**:: 15-22
|
||||
|
||||
> [!LINK]
|
||||
>
|
||||
> [Attachment](file:///Users/tomoya/Zotero/storage/ZAQ37PDB/Mathews,%20Roads%20-%201980%20-%20Interview%20with%20Max%20Mathews.pdf)
|
||||
> [PDF](file:///Users/tomoya/Zotero/storage/GFPCD4VD/full-text.pdf).
|
||||
|
||||
> [!Abstract]
|
||||
>.
|
||||
>
|
||||
# Notes
|
||||
|
||||
>.
|
||||
|
||||
|
||||
# Annotations%% begin annotations %%
|
||||
|
||||
|
||||
%% end annotations %%
|
||||
|
||||
|
||||
%% Import Date: 2025-01-20T16:49:43.891+09:00 %%
|
6
content/Karen Collins.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
date: 2025-03-08 13:47
|
||||
---
|
||||
#person
|
||||
|
||||
[Game Sound](https://mitpress.mit.edu/9780262537773/game-sound/)
|
8
content/Li Jingyi.md
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
date: 2025-05-15 13:52
|
||||
---
|
||||
#person
|
||||
|
||||
[Jingyi Li](http://jingyi.me)
|
||||
|
||||
[ORCID](https://orcid.org/0000-0002-8253-887X)
|
10
content/Ludo-Musica.md
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
date: 2025-03-08 15:08
|
||||
---
|
||||
#gameaudio
|
||||
|
||||
[[尾鼻崇]]らが中心に行ったオンラインゲーム展示
|
||||
|
||||
[Ludo-Musica Ⅲ](https://ludomusica.net/)
|
||||
|
||||
テキストはアーカイブが残っている
|
6
content/Ludomusicology.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
date: 2025-03-08 13:16
|
||||
---
|
||||
#research
|
||||
|
||||
[[ゲームオーディオ]]をより広範に指す学術領域
|
4
content/MIDI.md
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
date: 2025-01-24 10:36
|
||||
---
|
||||
#stub
|
30
content/MPEG4 Structured Audio.md
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
date: 2025-01-29 14:41
|
||||
---
|
||||
#computermusic
|
||||
|
||||
[The MPEG-4 Structured Audio Book](https://john-lazzaro.github.io/sa/book/index.html)
|
||||
|
||||
[[Csound]]を元にしたStructured Audio Orchestra Language
|
||||
|
||||
MIDIのように音色をシンセサイザーのアルゴリズムをインターネット越しに送るようなことを想定
|
||||
|
||||
Score LanguageであるSASLとOrchestra LanguageであるSAOLの2言語で構成
|
||||
|
||||
[Saol: The MPEG-4 Structured Audio Orchestra Language](https://quod.lib.umich.edu/cgi/p/pod/dod-idx/saol-the-mpeg-4-structured-audio-orchestra-language.pdf?c=icmc;idno=bbp2372.1998.488;format=pdf)
|
||||
|
||||
[SAOL: The MPEG-4 Structured Audio Orchestra Language on JSTOR](https://www.jstor.org/stable/3680734)
|
||||
|
||||
シンタックスはCSoundよりもC言語よりになっている
|
||||
|
||||
```
|
||||
instr beep(pitch,amp) { // make a beep
|
||||
table wave(harm,2048,1); // sinusoidal wave function
|
||||
asig sound; // ‘asig’ denotes audio signal
|
||||
ksig env; // ‘ksig’ denotes control signal
|
||||
|
||||
env = kline(0,0.1,1,dur-0.1,0); // make envelope
|
||||
sound = oscil(wave, pitch) * amp * env; // create sound by enveloping an oscillator
|
||||
|
||||
output(sound); // play that sound }
|
||||
```
|
@ -4,7 +4,12 @@
|
||||
|
||||
[[IRCAM]]と[[CCRMA]]で使われた[[MUSIC N#MUSIC IV|MUSIC IV]]の派生の一つ。後継として[[MUSCMP]]がある。
|
||||
|
||||
[[MUSIGOL]]に近い、[[ALGOL]]のサブセット+信号処理機能のような独自言語を定義しているため、他のMUSIC-N系列とはやや異なる体系をもっているっぽい
|
||||
[[MUSIGOL]]に近い、[[ALGOL]]のサブセット+信号処理機能のような独自言語を定義しているため、他のMUSIC-N系列とはやや異なる体系をもっているっぽい。
|
||||
|
||||
> It is worth pointing out that the starting point of MUS10 was an existing ALGOL parser, modified for music synthesis. We shall see several examples of this later in which the language designer simply took an existing language cmpiler and modified it to suit musical requirements. This is a very simple but effective way to start a language design.([Programming languages for computer music synthesis, performance, and composition | ACM Computing Surveys](https://dl.acm.org/doi/10.1145/4468.4485), p248)
|
||||
|
||||
|
||||
|
||||
|
||||
UGenをコントロールレートで遅い読み出しするのもできるらしい
|
||||
|
||||
|
@ -10,7 +10,7 @@ date: 2025-01-15 16:33
|
||||
|
||||
## MUSIC I
|
||||
|
||||
三角波にエンベロープを掛ける程度の処理。
|
||||
三角波にエンベロープを掛ける程度の処理。[[The Computer Music Tutorial, second edition - Curtis Roads#Notes]]も参照
|
||||
|
||||
## MUSIC II
|
||||
|
||||
@ -58,4 +58,4 @@ IBM360用に作られた。
|
||||
|
||||
[[PDP-11]]用に作られた。arとkrの区別が導入される。
|
||||
|
||||
のちの[[CSound]]に繋がる。
|
||||
のちの[[Csound]]に繋がる。
|
||||
|
@ -0,0 +1,314 @@
|
||||
---
|
||||
date: 2025-01-20 17:39
|
||||
---
|
||||
#research #computermusic #scrap
|
||||
|
||||
|
||||
[Max Mathews Full Interview | NAMM.org](https://www.namm.org/video/orh/max-mathews-full-interview)
|
||||
|
||||
Nottaの文字起こしを使用したのち手動で固有名詞は修正したが、まだエラーあるかも/Transcription is powered by [Notta](https://notta.ai) and I corrected specific words like human names and institution names but I think still there are several errors...
|
||||
|
||||
## AI要約
|
||||
|
||||
この書き起こしは、コンピュータ音楽とデジタル音響合成の先駆者にインタビューした内容を広範に紹介しています。Mathewsはベル研究所とその後スタンフォード大学での経験を語り、初期の音楽プログラミングから影響力のある音楽ソフトウェアの開発に至るまでの道のりを説明しました。彼は、バイオリンを弾いたり海軍の任務中に音楽の持つ感情的な力を発見したりした初期の音楽経験について触れました。また、Music 1からMusic 5への音楽プログラムの進化を詳細に述べ、ブロックダイアグラムコンパイラやウェーブテーブルオシレーターといった重要な革新を強調しました。ベル研究所での音声コーディングやデジタル音響処理における彼の仕事の重要性についても説明しました。さらに、ラジオ・バトンのようなリアルタイムパフォーマンス楽器の開発、FM合成の影響、および大型メインフレームから現代のノートパソコンへのコンピュータ音楽技術の進化についても触れました。また、ベル研究所の歴史やその研究文化、John ChowningやPierre Boulezといった重要な人物についても言及しました。インタビューを通じて、音楽技術の開発における人間の知覚を理解することの重要性が強調されました。
|
||||
|
||||
### チャプター
|
||||
|
||||
00:00:11 子供の頃の音楽的背景と教育
|
||||
|
||||
話者は、初期の音楽体験について語り、高校でバイオリンを学び、その後オーケストラや室内楽グループで活動を続けたことを話しました。また、シアトルでの海軍勤務についても語り、シェラックやビニールレコードを使用したリスニングルームで音楽の持つ感情的な力を発見したと述べました。
|
||||
|
||||
00:01:24 学歴とキャリアの初期段階
|
||||
|
||||
話者は、ネブラスカから海軍を経て、電気工学のためにカリフォルニア工科大学(カリテック)に進学し、最終的にはMITでコンピュータとアナログコンピューティングシステムに出会った教育の旅について詳しく説明しました。
|
||||
|
||||
00:10:48 音楽プログラムの開発
|
||||
|
||||
話し手は、音楽プログラムの進化について説明しました。最初にMusic 1の限界について述べ、その後、Music 2の四声システムとウェーブテーブルオシレーター、さらにMusic 3のブロックダイアグラムコンパイラ、最後にMusic 5のFORTRAN実装に至るまでの流れを紹介しました。
|
||||
|
||||
00:26:10 ベルラボと音響研究
|
||||
|
||||
発表者はベル研究所での彼の仕事についての概要を述べ、音声符号化の研究、デジタルテープ技術、音声と音楽の伝送を圧縮するシステムの開発に焦点を当てました。
|
||||
|
||||
00:51:21 FMシンセシスとスタンフォード大学
|
||||
|
||||
話し手は、FM合成の重要性、ジョン・チャウニングの貢献、スタンフォードでのサムソンボックスの開発、そして大型メインフレームから現代のノートパソコンまでの音楽技術の進化について語りました。
|
||||
|
||||
00:57:47 楽器とパフォーマンス技術
|
||||
|
||||
講演者は、ラジオバトンと指揮者プログラムがライブパフォーマンス用の楽器として発展してきたことを説明し、機械から無線技術への進化を詳しく述べました。
|
||||
|
||||
00:59:19 ラジオバトンシステムに機能を追加し続けることについて言及しました。
|
||||
|
||||
00:59:05 話者は、ラジオバトンプロジェクトにおけるトム・オーバーハイムとの継続的な協力について言及しました。
|
||||
|
||||
01:01:11 話者は、退職後にスタンフォードで週に2日関わり続けることを述べました。
|
||||
|
||||
00:17:50 講演者は、音楽家がデジタル音作成のために波形の物理的なパラメータを理解することの重要性を強調しました。
|
||||
|
||||
|
||||
---
|
||||
|
||||
## 文字起こし
|
||||
|
||||
Interviewer 00:06
|
||||
|
||||
Thank you for having a few minutes for me, I do appreciate it.
|
||||
|
||||
|
||||
[[Max Mathews|Max V. Mathews]] 00:09
|
||||
|
||||
Okay.
|
||||
|
||||
|
||||
|
||||
Interviewer 00:11
|
||||
|
||||
I think it's a good place to start, if you don't mind. It's just a little bit of background on yourself. And tell me the role of music in your life when you were growing up.
|
||||
|
||||
|
||||
|
||||
Max V. Mathews 00:23
|
||||
|
||||
Two things, I learned to play the violin, not well, and I still don't play it well when I was in high school. And I continued to play the violin with school orchestras and chamber groups, and still do that. It's a great joy in my life.Then at the end of the Second World War, I was in the Navy in Seattle, and the good people of Seattle had set up a listening room where you could go and listen mostly to shellac 78 RPM records, but a few vinyl 78 RPM records. And so I realized at that time that music had an emotional and beautiful and pleasurable content, and that also has been a great factor in my life. So those were the two places where I got into music.
|
||||
|
||||
|
||||
Interviewer 01:22
|
||||
|
||||
Now where did you grow up?
|
||||
|
||||
|
||||
Max V. Mathews 01:24
|
||||
|
||||
I grew up in Nebraska, and when I was 17, I guess I enlisted in the Navy as a radio technician trainee. Now we were called radio technicians, but we were really trained to repair radars, but the word radar was secret at that time. And so I finished school there and then went to Seattle and helped commission a destroyer, and then we shot the guns and shook the boat down and went back to Seattle, and then I was mustered out because the war had ended and VJ Day was over. I met Marjorie in San Francisco at the radar training school on Treasure Island, and we hit it off immediately. So I stayed in the West Coast, went to school at Caltech, studied electrical engineering there because I was in love with radar circuits. I wish I had studied physics there, but nevertheless it's a wonderful school.And then I went on to MIT and got introduced to computers. Those days analog computers were the useful computers, digital computers were still being developed, and I sort of loved these big complicated systems, and so we solved the kinds of problems that analog computers could solve, and that was my schooling.
|
||||
|
||||
Interviewer 03:03
|
||||
|
||||
Very interesting. Can you give me a little background on your family? Did your parents also grow up in Nebraska?
|
||||
|
||||
Max V. Mathews 03:11
|
||||
|
||||
Yes, my parents were both born there and grew up there. They were both teachers. My father was the principal of the teachers' training high school in Peru. There was a little teachers' college there. But what he really enjoyed was teaching the sciences. So he taught physics and biology and chemistry. And he let me play in his laboratory as well as in his workshops. And that was another thing that set the course of my life. I still enjoy working in a workshop and I still enjoy the sciences very much.
|
||||
|
||||
Interviewer 04:00
|
||||
|
||||
Very interesting. Well, what were the computers like when you first started getting interested in that?
|
||||
|
||||
Max V. Mathews 04:10
|
||||
|
||||
Well, the one computer that we used most, and this was to develop counter missiles to protect mostly against air attacks at that time. And this was a combination of an electromechanical system. So the integrator on the computer was a mechanical integrator, but the other components, the adders and more simple operations were done electronically. Then operational amplifiers were designed and came along at that time. And so then most of the simple integrations were taken over by the operational amplifier feedback circuit that still does that job. And only complex integrations of fairly nonlinear processes had to be done with the mechanical components.So the computer itself filled a large room full of relay racks that held both the analog components and the mechanical components. Now, there were a lot of interconnecting that had to be done at a patch field. The question would be, had you done it correctly, would it give the right solution to the original problem? And so we needed check solutions, and you could integrate the solution on a Marchant mechanical multiplying calculator machine. If you had a group of five or ten, I think in those days it was entirely women, and they worked for about a month to calculate one solution, whereas the analog computer, of course, would turn out a solution in a few seconds. So we would get these digital integrations and compare it with the analog result, and then figure out what mistakes we'd made and corrected, and then go ahead and study a lot of different conditions.When I came to Bell Labs in 1955, I started working and always worked in acoustic research there, and our main job was developing new telephone, well, new speech coders that really would compress the amount of channel that was needed to transmit the speech over expensive things like the transatlantic cable. And in the beginning, people had a number of ideas on how the encoding might work. Pitch period repeating was one of them. Channel vocoder processing was another of them. Format vocoders was yet a third, and in order to try these things, one had to build a sample model of them, and this was very complicated. The vacuum tubes were the things that we had to design and work with in those days. The transistor had not yet become practical. So it might take several years to design a trial equipment, and usually it didn't work. So then you would go back and do it again. And I thought that, well, I should say that this was just the time that computers were becoming powerful enough to do a digital simulation of many things. And in the case of speech, the essential thing was a way of getting speech into the computer and then getting it back out after you had processed it to see what it sounded like. And the key element that made that possible was not the computer, digital computer itself. You could run the computer for a few days to make a few minutes of speech. But the crucial thing was the digital tape recorder, which could take the output of an analog to digital converter at speech rates.
|
||||
|
||||
Max V. Mathews 09:00
|
||||
|
||||
In those days, it was 10,000 samples per second. Today it's 44,000 samples a second for CD music and more for other things. Anyhow, take these rapid flow of samples coming out and record them on a digital tape that then could be taken to the computer to be the input, slow input. And the computer would write a digital tape and you could take this back and play it back again at the 10,000 samples per second so you could hear the thing at speech frequencies. And this digital tape-based A to D computer input and output was the equipment that we built at Bell Labs that made this possible and was a completely successful device for speech research. And most of the modern coders came from this. And now, of course, as you know, it's not only digitized speech is not only used for research, it's the way that almost all information is transmitted. The reason being that digital transmissions are very rugged and number is a number and you can hand it on from one medium to another and from one company to another. And as long as you use the proper error correcting codes why if it goes to Mars and back you'll still get the correct numbers. So that's how the world works today.
|
||||
|
||||
Interviewer 10:38
|
||||
|
||||
Very interesting. Max, when did it first come into your mind that computers and music could be put together?
|
||||
|
||||
Max V. Mathews 10:48
|
||||
|
||||
I've forgotten the exact date, but it was in 1957, and my boss, or really my boss's boss, John Pierce, the famous engineer who invented satellite communication, and I were going to a concert. We both liked music as an art. And the concert was at local pianist who played some compositions by Schnabel and by Schoenberg. And at the intermission, we thought about these, and we thought that Schoenberg was very nice and that Schnabel was very bad, and John said to me, "Max, I bet the computer could do better than this", and "why don't you either take a little time off from writing programs for speech compression or maybe work in the midnight oil and make a music program". And as I said at the beginning, I love to play the violin, but I'm just not very good at it, and so I was delighted at the prospect of making an instrument that would be easier to play, at least in a mechanical sense, and I thought the computer would be that. So I went off and wrote my Music 1 program, which actually made sound, but horrible sound, so that you couldn't really claim it was music. But that led to Music 2 and eventually Music 5, which did make good music. And gradually, I'm not a musician, well, in any sense. I consider myself a creator and an inventor of new musical instruments, computer-based instruments. But my ideas did make an impact on musicians and composers and I think started, or it was one of the startings of the fields of computer music.
|
||||
|
||||
Interviewer 13:05
|
||||
|
||||
Absolutely. Tell me about music too. I'm sort of curious about that.
|
||||
|
||||
Max V. Mathews 13:10
|
||||
|
||||
Well, Music 1 had only one voice and only one wave shape, a triangular wave, an equal slope up and equal slope down. And the reason was that the fastest computer at the time, the [[IBM 704]], was still very slow. And the only thing it could do a tall fast was addition. And if you think about it, each sample could be computed from the last sample by simply adding a number to it. So the time was one addition per sample. Well, the only thing the composer had at his disposal was the steepness of the slope, how big the number was. So that would determine how loud the waveform was, and the pitch that you were going to make, and the duration of the note. And so that wasn't very much, and you didn't have any polyphony there. So they asked for making a program that could have more voices. And I made one with four voices. And I made one where you could have a controlled wave shape so that you could get different timbers as much as the wave shape contributes to the timbre. Now, in a computer, calculating a sine wave, or a damp sine wave, or a complicated wave is pretty slow, especially in those days. So I invented the wavetable oscillator where you would calculate one pitch period of the wave and store it in the computer memory, and then read this out at various pitches so that this then could be done basically by looking up one location in the computer memory, which is fast. And I also put a amplitude control on the thing by multiplying the wave shape by number. So this cost a multiplication and a couple of additions. So it was more expensive. By that time, computers had gotten maybe 10 or 100 times as fast as the first computer. So it really was practical. So that was music too. And some thing that most listeners would call music came out of that. And some professional composers used it. But they always wanted more. In particular, they didn't have any things like a controlled attack and decay, or vibrato, or filtering, or noise, for that matter. So it was a perfectly reasonable request. But I was unwilling to contemplate even adding these kind of code, one device at a time, to my music program. So what I consider my really important contribution, that still is important, came in MUSIC 3. And this was what I call a block diagram compiler. And so I would make a block, which was this waveform oscillator. And it would have two inputs. One was the amplitude of the output. And the other was the frequency of the output. And it would have one output. And I would make a mixer block, which could add two things together and mix them. And I made a multiplier block in case you wanted to do simple ring modulation. And I made a noise generator. And essentially, I made a toolkit of these blocks that I gave to the musician, the composer. And he could interconnect them in any way he wanted to make as complex a sound as he wanted. And this was also a note-based system so that you would tell the computer to play a note.
|
||||
|
||||
Max V. Mathews 17:50
|
||||
|
||||
And you would give the parameters that you wanted the computer to read for that note. You almost always specified the pitch and the loudness of the note. But you could have an attack and decay block generator included in this, and you could say how fast you wanted the attack and how long you wanted the decay to last, or you could even make an arbitrary wave shape for the envelope of the sound. And so this really was an enormous hit, and it put the creativity then, not only for composing the notes, the melodies, or the harmonies that you wanted played on the musician, on the composer, but it gave him an additional task of creating the timbres that he wanted.And that was a mixed blessing. He didn't have the timbres of the violin and the orchestral instruments to call upon that he understood. He had to learn how timbre was related to the physical parameters of the waveform. And that turned out to be an interesting challenge for musicians that some people learn to do beautifully and others will never learn it.The man who really got this started at the beginning was [[Jean Claude Risset]], a French composer and physicist who came to Bell Labs and worked with me. It was one of my great good luck and pleasures that he was around. And so he made a sound catalog that showed how you could create sounds of various instruments and sounds that were interesting but were definitely not traditional instruments. And that work still goes on. Risset is coming here to give some lectures at Stanford on April 3rd. He'll be here for the entire spring quarter.
|
||||
|
||||
Interviewer 20:03
|
||||
|
||||
Hmm, very interesting.
|
||||
|
||||
Max V. Mathews 20:06
|
||||
|
||||
But to finish up this series, that got me to Music 3. Along came the best computer that IBM ever produced, the IBM 704, 1794, excuse me. It was a transistorized computer, it was much faster, and it had quite a long life. They finally stopped supporting it in the mid-1960s, I guess.I had to write Music 4, simply reprogramming all the stuff I had done for the previous computer, for this new computer, which was a big and not very interesting job. So, when the 1794 was retired, and I had to consider another computer, I rewrote Music 5, which is essentially just a rewrite of Music 3 or Music 4, but in a compiler language. FORTRAN was the compiler that was powerful and existed in those days. And so that when the next generation beyond the Music 5 computers, the PDP-10 was a good example of a computer that ran well with music, I didn't have to rewrite anything. I could simply recompile the FORTRAN program, and that's true today. Now the sort of most direct descendant of Music 5 is a program written by [[Barry Vercoe]], who's at the Media Lab at MIT, and it's called Csound, and the reason the C in [[Csound]] stands for the C compiler. Now you're asking about Bell Labs, and many wonderful things came out of Bell Labs, including Unix, and of course Linux, and now the OSX operating system for Macintosh all started at Bell Labs. And the most powerful compiler, and I think the most widely used compiler, was also created at Bell Labs. It was called the C compiler, A and B were its predecessors, and C was so good that people stopped there, and now that's it for the world. Every computer has to have a C compiler now, whether it's a big computer or a little tiny DSP chip. So that's where that came from.
|
||||
|
||||
Interviewer 23:03
|
||||
|
||||
Very interesting you had mentioned um the envelope before and i just wonder were there other applications for that before the music programs .
|
||||
|
||||
Max V. Mathews 23:18
|
||||
|
||||
Other applications for what?
|
||||
|
||||
Interviewer 23:22
|
||||
|
||||
Well, for the process of, like, the use of envelope and pitch changes and.
|
||||
|
||||
Max V. Mathews 23:29
|
||||
|
||||
Ah, well, most of that is specific to music. Now, there are plenty of speech compression programs, and there are also music compression programs. And they make use of many ways of compressing sound. But I think the most interesting and most important today is compression of speech and music that is based on a property of the human ear. And this is called masking. And if you have a loud sound and a very soft sound, the loud sound will make it completely impossible to hear the soft sound. You won't hear it at all. And in fact, if you have a component in a sound, let's say a frequency band, which is loud, and the adjacent frequency band is very soft, why, you can't hear the soft frequency band. So that means, as far as speech coding goes, that you only have to send information to encode the loud things. And you do not have to send any or very little information to encode the soft things that are occurring while the loud things are happening. And this is how MP3, this is one of the important factors in MP3 and in speech codes that enable us to send and record and play back good music and good speech with very little bandwidth. How to send speech over Skype and other devices that send it over the Internet entirely digitally and without an enormous bandwidth. So I've forgotten the question that I was answering there, but anyway, this is one of the useful directions that has come out of the acoustic research in the last decades.
|
||||
|
||||
|
||||
Interviewer 26:01
|
||||
|
||||
That's very interesting. Could you give us a little information, the background on Bell Labs and some of the key players?
|
||||
|
||||
|
||||
Max V. Mathews 26:10
|
||||
|
||||
I can give you information about the most important players there, which were the members of the research department at Bell Labs. AT&T got started based on a patent of Alexander Graham Bell as a telephone network, and there was a lot of technology needed to implement Bell's patent, and so AT&T set up a scientific and technical group in New York City originally to do this, and that became a separate sub-company owned by AT&T called telephone laboratories. It grew to have a number of different parts, one of which was research, and that was a fairly small part of the company. The major people were in the development areas that took the research ideas and then converted them into products that were then supplied to the telephone companies. Originally and almost to the end, the research department consisted entirely of PhDs, usually in the field of physics and mathematics, then gradually some chemical departments were added to this, but a very select group. At that time, the telephone system was a regulated monopoly so that there was only one telephone company in almost the entire country. That made sense because there was no real reason for having two networks of wires connecting the houses together, and that was a very expensive part of the system. This then became a great source of income, and a very small portion of this income financed the research department. The research department didn't directly try to do things that would make profits, rather it tried to do things that were useful in the world of communication. They had a great deal of freedom in deciding what they thought would be useful.The sort of golden age of research at Bell Labs, at least in my horizon, started with the invention of the transistor to replace vacuum tubes for amplifying signals. This was done by what we call solid state physicists, physicists who understand how crystal materials interact with electrons, and how you can make amplifiers and get controlled voltages out of these. Then acoustic research was set up to apply the technology and to understand how people, how their ear works, what they need to understand speech, what they need to like speech, and what's dangerous about sounds, if they're too loud. The threshold of hearing and basic things about human hearing were part of that group. Now, the golden age of research at Bell Labs was really, well, it started out with the idea that Bell and his associates had that one should support a research group with an adequate amount of money. but it continued with one man, William O. Baker, who was the Vice President of Research. He both maintained the very selective standards of the people in the group, and he guarded the freedom of choice of how they would use the money, what they would do research on, very, very zealously, so that he insisted that AT&T provide him with the money to run the research department without strings attached, and his associates would decide how they would spend this money.Finally, he kept the size of the research group very limited. When I went to Bell Labs in 1955, there were about 1,000 people in the research department, and Bell Labs was about 10,000.
|
||||
|
||||
Max V. Mathews 32:10
|
||||
|
||||
When I left in 1987, there were still about 1,000 people in the research department. The rest of the Bell Labs had about 30,000 people, so he insisted that everyone use their resources wisely and not try to grow. This lasted until the Consent Decree in about 1980, which broke up the Bell System into seven operating areas, separate companies, and a company called AT&T, which would contain the Bell Labs, the research part, and also the Western Electric, which was the manufacturing arm that would provide telephone equipment to the operating companies, as it always had. But it opened the whole thing to competition, and also by that time digital transmission was coming in. In contrast to analog transmission of sound, which is very fragile, and if you want to send a conversation from San Francisco to New York or to Paris by analog, that means you really have to send it over carefully controlled analog equipment that really means all the equipment needs to be run by one company. But when digital things came along, then you could pass the digits on from between many, many companies in many, many ways. So essentially, the Telephone Research Lab no longer had the support that it did with this controlled monopoly, and so it was no longer possible really to support this group. It's expensive even to run a thousand people. The budget was something like $200 million a year. So that's my view of research in the part of Bell Labs. It was a wonderful time. It was a time when there was, of course, in the Second World War and afterwards, a strong military research group at Bell Labs and development group and things like the Nike anti-aircraft missile were developed there and many other things. Underwater sound was also another branch of the military research. I think the military research actually still goes on. Bell Labs eventually split up and became Lucent, which is the name you probably know it by. And now it's amalgamated with the French company Alcatel, so it's Alcatel-Lucent. And it's no longer limited to working in the field of communications as the original AT&T was. As a monopoly, it could not work in any field. It was allowed to work in the movie field, though, and developed sound techniques for movie film in the 1920s.
|
||||
|
||||
Interviewer 36:26
|
||||
|
||||
Was it still in New York when you joined them?
|
||||
|
||||
Max V. Mathews 36:29
|
||||
|
||||
No, it had moved, well, they still had the West Street Laboratories in New York, although they subsequently closed them maybe in 1960. But its central office was in New Jersey, Mary Hale, New Jersey, about 30 miles west of New York City, which could communicate to New York City easily on the train.And AT&T's headquarters at that time was still in New York City. And then it had other facilities in New Jersey, primarily at Homedale, which was about 50 miles south of Murray Hill and Whippany, which was about 10 miles north. But it had other laboratories connected more with products near Chicago and Indiana and became more diversified, which was a problem.
|
||||
|
||||
Interviewer 37:35
|
||||
|
||||
How so?
|
||||
|
||||
|
||||
Max V. Mathews 37:36
|
||||
|
||||
Oh, just the fact that it's a lot easier to think of something new by going to lunch with your friends and talking with them than it is to call them up over telephone in Chicago from Murray Hill.
|
||||
|
||||
Interviewer 37:59
|
||||
|
||||
Do you think, based on what you were doing and what others were doing at Bell Labs, that it is correct to say that what [[Bob Moog]] and [[Don Buchla]] were doing were the first in their fields for synthesized music?
|
||||
|
||||
|
||||
Max V. Mathews 38:22
|
||||
|
||||
Well, saying what's first is always problematic, and I don't much try to speculate there. The thing that was interesting was that Moog and Buchla and myself, both, all three of us developed what I called a block diagram compiler. A compiler is not the right word. In the case of Buchla and Moog, they were modular synthesizers so that you could have a bunch of modules and plug them together with patch cords that a musician, the user, could plug them together in any way he wanted. They were analog modules, and I made the digital equivalent of most of those, or they made the analog equipment of mine, the oscillator, of course, and the attack and decay generators and the filters and the mixers and things like that. The computer had at least the initial advantage that the computer memory could also contain the score of the music, and in the early Moog things it was harder to put the score into an analog device. They did gradually introduce what they called sequencers, which is a form of score, but it never became as general as what you could do with a digital computer, and it never became as general as what you can do with MIDI files.And do you know what the difference is between a MIDI file and MIDI commands? Well, a MIDI command has no execution time attached to it per se. Just a command that lets you turn on a note in some synthesizer by some other keyboard that sends a standard command, the MIDI command, to the synthesizer.And this was an enormous advance for analog equipment or combination digital analog because the MIDI file itself is digital. But it was an enormous communication standard, very reluctantly entered into by the big companies. Yamaha, I don't think, was at the beginning of this. It was [[Dave Smith]] that, I've forgotten his name of his company.
|
||||
|
||||
Interviewer 41:14
|
||||
|
||||
Sequential circuit?
|
||||
|
||||
Max V. Mathews 41:14
|
||||
|
||||
Sequential circuits, and Roland and one other company that were the initiators of the MIDI commands. Then people figured out that if you put a sequence of these commands into a computer that would play them one after the other, and if you put a time code in that said when to play them, or really the delta time, how long it is between playing one command and playing the next command, then you could encode a complete piece of music as a MIDI file, and so this was another really great breakthrough that Smith and Roland and this other company did.
|
||||
|
||||
Interviewer 42:06
|
||||
|
||||
Yeah, absolutely. What role, if any, did music concrete play in the evolution of all of this?
|
||||
|
||||
Max V. Mathews 42:16
|
||||
|
||||
Um... Oh, music concrete started before all this came along, and the technology used was the tape recorder technology, and changing the speed of tapes and making tape loops, which play something repetitiously, and being able to splice snippets of tape with various sounds on them, so you could make a composition, for example, by splicing the tapes of various pitches, and that was a very successful and a very tedious operation, and one of the things that I tried to do was to make the computer do the tedious part of it, which it does very well, and make the composer think more about the expressive part. Now people argue a lot about music concrete, and what was Stockhausen's alternate thing where he generated all sounds, not by recording real sources, but by using oscillators, I think. I've forgotten the name for that, but anyway, that now, I think, is an absolutely meaningless argument, because digitized sound is so universal that the sources of the sound can either come from nature, from recordings of instruments, sampled things, or they can be synthesized, and you can use FM techniques, or additive synthesis, or a myriad of other ways of making your sound. So I don't really think it's worth hashing over this very old conflict, and I guess [[Pierre Schaffer]] is died a number of years ago.
|
||||
|
||||
Interviewer 44:50
|
||||
|
||||
Yeah.
|
||||
|
||||
Max V. Mathews 44:51
|
||||
|
||||
Stockhausen is still around. Chowning's FM synthesis really started out as a purely synthesized sound with no recording of natural sounds being involved.But now most synthesizers use samples. They process these samples in ways, including FM ways, to get the timbre that the person wants.
|
||||
|
||||
Interviewer 45:21
|
||||
|
||||
And did you know, John, before...
|
||||
|
||||
Max V. Mathews 45:26
|
||||
|
||||
John was studying as a grad student at Stanford, and he and Ray Say too read a paper I wrote in Science Magazine about the Music 3 program, and he came back to Bell Labs and spent a day with me, and he was very bright, and he understood what I was doing instantly, and he went back to Stanford and wrote his own music program, and then he tied up with the artificial intelligence laboratory that [[John McCarthy]] had set up at Stanford, and they had a very good computer, a DEC PDP-10, which in my mind was by far the best computer that existed in those days. So John could, at night when the AI people were home sleeping, he could use a computer for making music on these programs, and so he made wonderful music, and he, well, one of the things that Ray Say found was that in order to be interesting, the spectrum of a sound has to change over the duration of a note, and if the spectrum is constant over the note, why your ear very rapidly gets tired of the sound and doesn't think it's beautiful or charming, and so Ray Say used additive synthesis with a lot of oscillators and changing their amplitude, their outputs to make a changeable spectrum, and he could make very good instrumental sounds and other sounds this way, but it was very expensive, and John found that by using frequency modulation in a way that it had never been used for communication purposes, that he could also make the spectrum change over notes and do similar things to what Risset did with additive synthesis, and this was much more efficient.It took less computer power to do that, and he also, John was a very good salesman. He persuaded the Yamaha company to design a chip to do FM synthesis, and this was the Yamaha DX7 computer, and sort of overnight that brought down the price of an entry-level system that could make interesting music from a [[PDP-11]] computer costing about $100,000 to a DX7 costing about $2,000, and of course that increased the number of people who were using this from, I don't know, maybe a ratio of a thousand to one increase from the decrease in the cost. So anyway, as I say, John visited me in the early 60s, and then he went back and did his thing at Stanford, and Risset spent several years at Bell Labs in the 60s, and then he went back to France, and gradually got a digital system going there, and persuaded [[Pierre Boulez]] that, or maybe Boulez persuaded himself that there should be a computer part of the IRCAM laboratory that Boulez had talked Pompidou into supporting in France, and Risset was put in charge of that laboratory. Risset persuaded me and Boulez that I should spend some time there. I continued to work at Bell Labs, helping set up IRCAM.
|
||||
|
||||
|
||||
Interviewer 49:41
|
||||
|
||||
Hm.
|
||||
|
||||
|
||||
Max V. Mathews 49:41
|
||||
|
||||
I was the first scientific director there. It was a very interesting job.
|
||||
|
||||
|
||||
Interviewer 49:52
|
||||
|
||||
What sort of things made it so interesting for you there?
|
||||
|
||||
Max V. Mathews 49:56
|
||||
|
||||
Oh, no, excitement of working in Paris, trying to learn how to speak a little French. Getting a system going with a PDP-10 computer, which the French had enough money to buy, and getting the analog to digital analog parts on it. Using them, they had some very good studio rooms so that you could do good psychoacoustic research. You need a nice quiet room to listen to things in, and here come had that.The rooms were connected to the computer so you could make good test sounds to evaluate. Working with Risset and Gerald Bennett, who I still work with very much. [[David Wessel]], of course, came over there. It's about a decade or two. Working with the musicians there and the technical people. It was an exciting time in my life.
|
||||
|
||||
Interviewer 51:09
|
||||
|
||||
Going back to John for just a second. From your perspective, what was the importance of FM synthesis?
|
||||
|
||||
Max V. Mathews 51:21
|
||||
|
||||
Well, the importance was that you could make good music with it. That also led to the [[SAMSON BOX|Samson Box]], which could do real-time FM synthesis, as could the DX7, but more powerful synthesis. And so the Samson Box was designed and built here, I guess, in the Bay Area by Peter Samson. And for about a decade, it had a monopoly on the rapid and efficient synthesis of really powerful music, a monopoly at John's CCRMA Laboratory. And so just an enormous string of very excellent music came out of that, and good musicians from all over were attracted to CCRMA because of that machine. Now, you could make this same music, but at a much slower time on a PDP-10 by itself, but the Samson Box made a second of music in a second of time. That was real time. It was intended to be used for live performance of computer music. That was a tension, and it could have done that, but it really was never capitalized on because, A, you had to have a PDP-10 to drive the Samson Box, and B, you had to have the Samson Box, which was about the size of a big refrigerator. And so it really wasn't practical to take this on the stage where you have to do a performance. And so it produced essentially tape music, but rich tape music. The lifetime of the Samson Box was really ended by the advent of the laptop computers, and the laptop computers getting so powerful that they now can do what the Samson Box can do ten times faster than the Samson Box. Either the Macintosh or the PC that I have can do that. They, of course, surpassed the PDP-10, so the power of computers that you can carry around in your briefcase is greater than musicians know how to utilize. The world is no longer limited, the musical world, by the technology and what it can do. Instead, it's very much limited by our understanding of the human ear and the human brain and what people want to hear as music, what excites them, what makes them think it's beautiful. And that's the continuing forefront of research and future development for music entirely.
|
||||
|
||||
Interviewer 55:00
|
||||
|
||||
What exactly is an oscillator and were the oscillators that theremin used the same oscillators that were used in the early days of Bell Labs? Can you talk a little bit about that?
|
||||
|
||||
Max V. Mathews 55:16
|
||||
|
||||
Yeah, they were the same oscillators. They were based on the vacuum tube, the triode that do forests, and maybe others invented. And that made it possible to make radios and do things.And Thurman's work came along very shortly after the vacuum tube came along, and long-distance telephony essentially had to use vacuum tubes.
|
||||
|
||||
Interviewer 55:48
|
||||
|
||||
What made theremin's use of the oscillator so unique, do you think?
|
||||
|
||||
Max V. Mathews 55:55
|
||||
|
||||
Oh, he found that if you had a somewhat unstable oscillator, you could influence the pitch of the oscillator by moving your hand in the electric field produced by the oscillator and an antenna attached to the oscillator. And so this was a way of controlling the pitch. And he also used the same technique for controlling the loudness of the sound. So that was his real contribution.
|
||||
|
||||
|
||||
Interviewer 56:34
|
||||
|
||||
Did you ever have a chance to meet him?
|
||||
|
||||
Max V. Mathews 56:35
|
||||
|
||||
Oh yeah, he came over with one of his daughters, I think, to Stanford and gave a lecture and a concert. I played with the daughter.She played the theremin and Rachmaninoff's vocalese, and she did the vocalese part, which the theremin is good for. I did the orchestral accompaniment on one of my instruments, the radio baton.
|
||||
|
||||
Interviewer 57:13
|
||||
|
||||
Very interesting. What sort of guy did you find him to be?
|
||||
|
||||
|
||||
Max V. Mathews 57:18
|
||||
|
||||
Oh, he, at the age of 90, could out-drink and out-stay me in the evening, and I stayed around until midnight, and then I went home and collapsed. Yeah, I think he was a universal man, a citizen of the world.
|
||||
|
||||
Interviewer 57:37
|
||||
|
||||
You mentioned a music baton, which is something I wanted to just briefly talk about. You had several instruments that you really helped design. Was that the first?
|
||||
|
||||
Max V. Mathews 57:47
|
||||
|
||||
Well, Music 1 was the first, and then I got interested in real-time performance. The radio baton and the conductor program were intended as a live performance instrument.The conductor program supplied the performer with a virtual orchestra, and the performer was a conductor, not an instrument player, or at least a simulated thing. So he would beat time using one baton in one hand, as the conductor did in the conductor program, would follow his beat. He could speed up or slow down. Then he would use the other hand to provide expression to the music, the loudness or the timbre, and both of these batons could be moved in three-dimensional space and could send XYZ information to the computer. That's where the radio part came in to track the batons.
|
||||
|
||||
Interviewer 58:55
|
||||
|
||||
Interesting. How many of those were made?
|
||||
|
||||
Max V. Mathews 58:59
|
||||
|
||||
Oh, about, they're still being made, about 50 of them.
|
||||
|
||||
|
||||
Interviewer 59:05
|
||||
|
||||
Is there any part of that that you wish you could have added a feature or something to that didn't get worked in right away?
|
||||
|
||||
Max V. Mathews 59:19
|
||||
|
||||
I'm still adding features to them, and so originally they were a mechanical drum that you had to actually hit to sense, but it would sense where you hit it. Then it became a radio device. The radio technology was designed by a friend from Bell Labs named Bob Bowie. He's retired and lives in Vermont now. Anyway, this meant you didn't have to touch anything. You could wave these things in three-dimensional space, and that was nice, a great freedom. Originally, you had to have wires attached to the batons to power the little transmitters that were in the ends of the batons. The latest model is wireless, and [[Tom Oberheim]] helped me design and build the batons. We still worked together, and I went to breakfast with him before I came here. He and I together made the radio baton version of it, the cordless radio baton. So that is my main live performance instrument, and I love live performance, I think. Performing music and playing with other people is one of the real joys of life. Chamber music is wonderful.
|
||||
|
||||
Interviewer 01:00:56
|
||||
|
||||
Well said. And just because I don't want to insult the enormous contribution you did at Stanford, I just wanted to acknowledge that and ask you, was that a good run for you?
|
||||
|
||||
Max V. Mathews 01:01:11
|
||||
|
||||
I still go down there a couple of weeks, days a week, even though I retired last September officially. But yes, and I've enjoyed working with John, for example, and [[Bill Schottstadt]], and many of the other people in CCRMA. It's a great group. A very, again, a very free group where people aren't told what to do. They have to figure out what they want to do.
|
||||
|
8
content/Museum of Solar Energy.md
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
date: 2025-05-13 14:55
|
||||
---
|
||||
#solar
|
||||
|
||||
[Museum Of Solar Energy | Solar Cell Archive](https://solarmuseum.org/)
|
||||
|
||||
実物の展示は移動展示らしい
|
8
content/No Tech Magazine.md
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
date: 2025-05-12 17:29
|
||||
---
|
||||
#website
|
||||
|
||||
[[Low Tech Magazine]]を作った[[Kris De Decker]]のブログ
|
||||
|
||||
[NO TECH MAGAZINE – Technology for Luddites](https://www.notechmagazine.com/)
|
4
content/Pierre Schaffer.md
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
date: 2025-01-20 18:08
|
||||
---
|
||||
#stub
|
@ -0,0 +1,73 @@
|
||||
---
|
||||
cssclass: research-note
|
||||
type: "journalArticle"
|
||||
author: "Adkins, Monty; Scott, Richard; Tremblay, Pierre Alexandre"
|
||||
title: "Post-Acousmatic Practice: Re-evaluating Schaeffer’s heritage"
|
||||
publication: "Organised Sound"
|
||||
date: 2016-08-01
|
||||
citekey: adkins2016
|
||||
tags:
|
||||
- research
|
||||
- journalArticle
|
||||
---
|
||||
|
||||
> [!Cite]
|
||||
> Adkins, Monty, Richard ScottとPierre Alexandre Tremblay. 2016. 「Post-Acousmatic Practice: Re-Evaluating Schaeffer’s Heritage」. _Organised Sound_ 21 (2): 106–16. [https://doi.org/10.1017/S1355771816000030](https://doi.org/10.1017/S1355771816000030).
|
||||
|
||||
>[!Synth]
|
||||
>**Contribution**::
|
||||
>
|
||||
>**Related**::
|
||||
>
|
||||
|
||||
[online](http://zotero.org/users/12014264/items/J2E4LKTB) [local](zotero://select/library/items/J2E4LKTB) [pdf](file:///Users/tomoya/Zotero/storage/9MXHRA65/Adkins%20et%20al.%20-%202016%20-%20Post-Acousmatic%20Practice%20Re-evaluating%20Schaeffer’.pdf)
|
||||
|
||||
|
||||
|
||||
>[!md]
|
||||
> **FirstAuthor**:: Adkins, Monty
|
||||
> **Author**:: Scott, Richard
|
||||
> **Author**:: Tremblay, Pierre Alexandre
|
||||
~
|
||||
> **Title**:: Post-Acousmatic Practice: Re-evaluating Schaeffer’s heritage
|
||||
> **Year**:: 2016
|
||||
> **Citekey**:: adkins2016
|
||||
> **itemType**:: journalArticle
|
||||
> **Journal**:: *Organised Sound*
|
||||
> **Volume**:: 21
|
||||
> **Issue**:: 2
|
||||
> **Pages**:: 106-116
|
||||
> **DOI**:: 10.1017/S1355771816000030
|
||||
|
||||
> [!LINK]
|
||||
>
|
||||
> [Adkins et al. - 2016 - Post-Acousmatic Practice Re-evaluating Schaeffer’.pdf](file:///Users/tomoya/Zotero/storage/9MXHRA65/Adkins%20et%20al.%20-%202016%20-%20Post-Acousmatic%20Practice%20Re-evaluating%20Schaeffer’.pdf).
|
||||
|
||||
> [!Abstract]
|
||||
>
|
||||
> This article posits the notion of the post-acousmatic. It considers the work of contemporary practitioners who are indebted to the Schaefferian heritage, but pursue alternative trajectories from the established canonical discourse of acousmatic music. It will outline the authors’ definition of the term and also outline a network of elements such as time, rhythm, pitch, dynamics, noise and performance to discuss work that the authors’ consider to be a critique, an augmentation and an outgrowth of acousmatic music and thinking.
|
||||
>.
|
||||
>
|
||||
# Notes
|
||||
|
||||
[[ポストアクースマティック]]の概念を述べた論文。結局統一的な定義とか無理だよねって感じで拡散したまま議論が終わるので結局post-acousmaticの旗印がなんの役に立つのかいまいちピンとこない、、、
|
||||
|
||||
> The rich possibilities of acousmatic performance today rest in part on changes in technology. The technological tools which were once hidden away under lock and key in the institutional Computer Music Studio are now ubiquitous, and they are portable. Any desktop or laptop computer, and increasingly phones, tablets and other devices, can now be used to make acousmatic music or any other kinds of electronic music.
|
||||
|
||||
これかなり欺瞞ですよね(ポータブルになったコンピューターの向こう側が結局ブラックボックスになっただけなので)
|
||||
|
||||
> What initially started as different genres of music utilising the same tools for different ends has resulted in a gradual osmotic transference over the past decades in studio-based genres of influences, tools, methods and writing techniques that have facilitated a more sophisticated dialogue between each of them rather than a shallow reciprocal plundering.
|
||||
|
||||
これも
|
||||
|
||||
> If, as Demers writes, we define ourselves by the art we consume, then to an extent we are also defined by the manner in which we choose to consume it.
|
||||
|
||||
ここに自覚的なのはまあわかる
|
||||
|
||||
# Annotations%% begin annotations %%
|
||||
|
||||
|
||||
%% end annotations %%
|
||||
|
||||
|
||||
%% Import Date: 2025-01-21T18:33:52.064+09:00 %%
|
@ -34,5 +34,10 @@ comment::
|
||||
|
||||
%% begin annotations %% %% end annotations %%
|
||||
|
||||
エキシマレーザー 93 nm (ArF) and 248 nm (KrF)
|
||||
|
||||
ファイバーレーザーが1000nm、CO2レーザーが10000nmくらいのオーダー
|
||||
|
||||
低温ではあるけど真空は必要
|
||||
|
||||
%% Import Date: 2023-08-10T15:39:03.591+09:00 %%
|
||||
|
10
content/Queer Use.md
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
date: 2025-05-15 13:53
|
||||
---
|
||||
#notion
|
||||
|
||||
[[Sara Ahmed]]
|
||||
|
||||
テクノロジーの[[誤用]](misuse)や流用(appropriate)/、転用(exploit)を、クィア・リーディングのようにクィアな使用と名付ける
|
||||
|
||||
[Queer Use | feministkilljoys](https://feministkilljoys.com/2018/11/08/queer-use/)
|
8
content/RCGSアーカイブ.md
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
date: 2025-03-08 14:57
|
||||
---
|
||||
#game #research
|
||||
|
||||
立命館大学のゲーム関連資料のコレクション
|
||||
|
||||
[トップ | RCGSコレクション](https://collection.rcgs.jp/)
|
8
content/SGMK.md
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
date: 2025-02-18 20:27
|
||||
---
|
||||
#institution #stub
|
||||
|
||||
|
||||
|
||||
[Hacker-in-Residence Programme – SGMK – Schweizerische Gesellschaft für Mechatronische Kunst](https://mechatronicart.ch/hacker-in-residence-programme/#:~:text=The%20Hacker%2Din%2DResidence%20Programme,and%20board%20member%20of%20both)
|
4
content/Sara Ahmed.md
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
date: 2025-05-15 13:54
|
||||
---
|
||||
#person
|
16
content/Solarpunk.md
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
date: 2025-02-27 13:01
|
||||
---
|
||||
#solar
|
||||
|
||||
[[サイバーパンク]]、[[スチームパンク]]のようなSFのジャンルの一つで、太陽光をエネルギー源とした世界観
|
||||
|
||||
[A Solarpunk Manifesto (Japanese) – ReDes – Regenerative Design](http://www.re-des.org/es/a-solarpunk-manifesto-japanese/)
|
||||
|
||||
|
||||
- カウンターカルチャー的思考ではあるが、ディストピアよりもユートピア傾向にある
|
||||
- 例えば[[DuskOS]]とかのポストコラプション(破滅以後)テクノロジーよりは[[PermaComputing]]のように持続的であることを目指している
|
||||
|
||||
[About – Solarpunk Magazine](https://solarpunkmagazine.com/about/)
|
||||
|
||||
|
18
content/Somax2.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
date: 2025-06-09 09:03
|
||||
---
|
||||
#computermusic
|
||||
|
||||
[Somax2 | STMS Lab](https://www.stms-lab.fr/projects/pages/somax2/)
|
||||
[somax2 \[Music Representations Team\]](http://repmus.ircam.fr/somax2)
|
||||
|
||||
Voyagerというシステムの後継
|
||||
|
||||
Real-Time Instrumental Playing Technique(IPT) Recognition
|
||||
|
||||
機械学習ベース
|
||||
|
||||
ピッチごとにいろんなテクニック(フラッターとか)をやったデータセットを作る
|
||||
|
||||
GFDataBase
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
date: "2024-02-06T02:00:06+0900"
|
||||
---
|
||||
#programming-language #software #tools
|
||||
#programming-language #software #tools #sound
|
||||
|
||||
James McCartneyが開発、その後OSS化された音楽プログラミング言語。
|
||||
|
||||
|
@ -0,0 +1,67 @@
|
||||
---
|
||||
cssclass: research-note
|
||||
type: "book"
|
||||
author: "Roads, Curtis"
|
||||
title: "The Computer Music Tutorial, second edition"
|
||||
date: 2023-06-06
|
||||
citekey: roads2023
|
||||
tags:
|
||||
- research
|
||||
- book
|
||||
---
|
||||
[[Curtis Roads]]
|
||||
|
||||
|
||||
> [!Cite]
|
||||
> Roads, Curtis. 2023. _The Computer Music Tutorial, second edition_. 第2版. Cambridge, Massachusetts London: The MIT Press.
|
||||
|
||||
>[!Synth]
|
||||
>**Contribution**::
|
||||
>
|
||||
>**Related**::
|
||||
>
|
||||
|
||||
[online](http://zotero.org/users/12014264/items/HS4VS3U3) [local](zotero://select/library/items/HS4VS3U3)
|
||||
|
||||
>[!md]
|
||||
> **FirstAuthor**:: Roads, Curtis
|
||||
~
|
||||
> **Title**:: The Computer Music Tutorial, second edition
|
||||
> **Year**:: 2023
|
||||
> **Citekey**:: roads2023
|
||||
> **itemType**:: book
|
||||
> **Publisher**:: The MIT Press
|
||||
> **Location**:: Cambridge, Massachusetts London
|
||||
> **ISBN**:: 978-0-262-04491-2
|
||||
|
||||
> [!LINK]
|
||||
>.
|
||||
|
||||
> [!Abstract]
|
||||
>
|
||||
> Expanded, updated, and fully revised—the definitive introduction to electronic music is ready for new generations of students.Essential and state-of-the-art, The Computer Music Tutorial, second edition is a singular text that introduces computer and electronic music, explains its motivations, and puts topics into context. Curtis Roads’s step-by-step presentation orients musicians, engineers, scientists, and anyone else new to computer and electronic music.The new edition continues to be the definitive tutorial on all aspects of computer music, including digital audio, signal processing, musical input devices, performance software, editing systems, algorithmic composition, MIDI, and psychoacoustics, but the second edition also reflects the enormous growth of the field since the book’s original publication in 1996. New chapters cover up-to-date topics like virtual analog, pulsar synthesis, concatenative synthesis, spectrum analysis by atomic decomposition, Open Sound Control, spectrum editors, and instrument and patch editors. Exhaustively referenced and cross-referenced, the second edition adds hundreds of new figures and references to the original charts, diagrams, screen images, and photographs in order to explain basic concepts and terms.FeaturesNew chapters: virtual analog, pulsar synthesis, concatenative synthesis, spectrum analysis by atomic decomposition, Open Sound Control, spectrum editors, instrument and patch editors, and an appendix on machine learningTwo thousand references support the book’s descriptions and point readers to further studyMathematical notation and program code examples used only when necessaryTwenty-five years of classroom, seminar, and workshop use inform the pace and level of the material
|
||||
>.
|
||||
>
|
||||
# Notes
|
||||
|
||||
第1版からの違い、[[MUSIC N]]のMUSIC I/IIについての微妙な表記の違い([[Computer Music Languages and Systems - The Synergy Between Technology and Creativity |Nishino&Nakatsu 2016]]での引用のとこで気づいた)
|
||||
|
||||
第1版p87
|
||||
|
||||
> A patient user could specify notes only in terms of pitch, waveform, and duration([[Interview with Max Mathews - Curtis Roads, Max Mathews| Roads 1980]]).
|
||||
|
||||
第2版p61
|
||||
|
||||
> A user could specify notes only in terms of pitch and duration(Roads 1980)
|
||||
|
||||
やっぱMUSIC IではWaveformの指定なかったよね?そしてこれUser側のテキストフォーマットでの入力ってなかったよね?
|
||||
|
||||
|
||||
|
||||
# Annotations%% begin annotations %%
|
||||
|
||||
|
||||
%% end annotations %%
|
||||
|
||||
|
||||
%% Import Date: 2025-01-20T16:44:42.474+09:00 %%
|
16
content/The Invisible Computer - Donald Norman.md
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
date: 2025-01-24 10:29
|
||||
---
|
||||
#book
|
||||
|
||||
[[Donald Norman]] (1999)
|
||||
|
||||
[The Invisible Computer : Donald A. Norman : Free Download, Borrow, and Streaming : Internet Archive](https://archive.org/details/invisiblecompute00norm_0)
|
||||
|
||||
[[Mark Weiser]]の[[ユビキタス・コンピューティング]]の影響に加えて、[[インフラストラクチャ]]の重要性について言及
|
||||
|
||||
> Infrastructure is usually thought to be dull. Tedious. Few people wish to think about it is necessary, which is then often too late. Once Established, it is expensive and often difficult to change.(p132)
|
||||
|
||||
[[MIDI]]が上手くいった規格だと言及
|
||||
|
||||
> Perhaps the best example of an effective, well-accepted family of information appliances is that of electric musical instruments, such as electric guitars, keyboards, drum machines, and synthesizers, along with the wide variety of sound generation and processing algorithms that supplement the musical devices(p.57)
|
6
content/Thor Magunusson.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
date: 2025-01-29 14:35
|
||||
---
|
||||
#person
|
||||
|
||||
[[Epistemic Tools]]
|
@ -0,0 +1,17 @@
|
||||
---
|
||||
title: "Toward Appropriating Tools for Queer Use | Proceedings of the Halfway to the Future Symposium"
|
||||
url: "https://dl.acm.org/doi/10.1145/3686169.3686186"
|
||||
date: "2025-05-15 13:51:39"
|
||||
---
|
||||
#queercomputing #hci
|
||||
|
||||
# Toward Appropriating Tools for Queer Use
|
||||
|
||||
[[Li Jingyi]]
|
||||
|
||||
https://dl.acm.org/doi/10.1145/3686169.3686186
|
||||
|
||||
> Article No.: 24, Pages 1 - 4
|
||||
|
||||
|
||||
[[Sara Ahmed]]の[[Queer Use]]の概念をHCIでどこまで応用できるか。
|
4
content/Unity.md
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
date: 2025-06-09 09:34
|
||||
---
|
||||
#stub
|
BIN
content/img/HALO.png
Normal file
After Width: | Height: | Size: 4.1 MiB |
BIN
content/img/Pasted image 20250206174905.png
Normal file
After Width: | Height: | Size: 252 KiB |
BIN
content/img/another_moon.png
Normal file
After Width: | Height: | Size: 4.1 MiB |
BIN
content/img/light-barrier.png
Normal file
After Width: | Height: | Size: 1.5 MiB |
BIN
content/img/teamlab-instagram-ss.png
Normal file
After Width: | Height: | Size: 727 KiB |
BIN
content/img/teamlab-light-sculptures.png
Normal file
After Width: | Height: | Size: 2.4 MiB |
BIN
content/img/スクリーンショット 2025-02-07 14.32.12.png
Normal file
After Width: | Height: | Size: 126 KiB |
BIN
content/img/スクリーンショット 2025-04-06 162040.png
Normal file
After Width: | Height: | Size: 280 KiB |
BIN
content/img/スクリーンショット 2025-04-06 162931.png
Normal file
After Width: | Height: | Size: 397 KiB |
BIN
content/img/スクリーンショット 2025-04-06 163225.png
Normal file
After Width: | Height: | Size: 324 KiB |
BIN
content/img/スクリーンショット 2025-04-06 200807.png
Normal file
After Width: | Height: | Size: 39 KiB |
BIN
content/img/スクリーンショット 2025-04-06 200826.png
Normal file
After Width: | Height: | Size: 44 KiB |
BIN
content/img/スクリーンショット 2025-04-06 200834.png
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
content/img/スクリーンショット 2025-04-06 201241.png
Normal file
After Width: | Height: | Size: 605 KiB |
@ -16,6 +16,7 @@ https://matsuuratomoya.com
|
||||
- [[mimium]]
|
||||
- [[プログラミングの良いチュートリアル]]
|
||||
- [[電子管楽器]]
|
||||
- [[DIY可能なトランペット]]
|
||||
- [[DIY半導体]]
|
||||
- [[オルタナティブ電子基板]]
|
||||
- [[自宅サーバー]]
|
||||
@ -43,6 +44,8 @@ https://matsuuratomoya.com
|
||||
|
||||
[[余暇を使って職業配信者がバイトゲームをするのを鑑賞する]]
|
||||
|
||||
[[論文を二言語並行して書く]]
|
||||
|
||||
### [タグ一覧](/tags)
|
||||
|
||||
よく使いそうなタグ
|
||||
|
14
content/ploopy.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
date: 2025-05-13 11:21
|
||||
---
|
||||
#diy #opensource #hardware
|
||||
|
||||
オープンソースのマウス、トラックボール、トラックパッド、ヘッドフォンとかを作ってるところ
|
||||
|
||||
[Ploopy – Open-source hardware projects that don't suck.](https://ploopy.co/)
|
||||
|
||||
3Dモデル、PCB、ファームウェアなど全部公開されている
|
||||
|
||||
[ploopyco · GitHub](https://github.com/ploopyco)
|
||||
|
||||
マウス類は基本QMK対応
|
6
content/web0.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
date: 2025-03-08 13:28
|
||||
---
|
||||
#stub #lowtech
|
||||
|
||||
[web0 manifesto](https://web0.small-web.org)
|
@ -1,4 +0,0 @@
|
||||
---
|
||||
date: 2025-01-15 16:10
|
||||
---
|
||||
#stub #book
|
6
content/お母さんは忙しくなるばかり 家事労働とテクノロジーの社会史-ルース・シュウォーツ・コーワン.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
date: 2025-01-15 16:10
|
||||
---
|
||||
#book
|
||||
|
||||
> 一八世紀の暖炉からレンガ一辺が落ちても、家族のだれかがモルタルを練ってレンガを埋める方法を知っていたであろう。これに比較して、二〇世紀の電気オーブンの電熱線がゆるんだら、家族のだれかがどうしたらよいか知っていたり、適当な修理用品を持っていることはありえないであろう。この意味で、家事労働をする人は自分が使う道具から疎外されているのであり、工場の組み立てラインや溶鉱炉で働く人の労働が疎外されているのと同様である。(p5)
|
10
content/なぜコンピューター音楽はクソッタレなのか.md
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
date: 2025-01-18 00:44
|
||||
---
|
||||
#scrap #computermusic
|
||||
|
||||
"Why Computer Music Sucks - [[Bob Ostertag]]" (1998)
|
||||
|
||||
https://web.archive.org/web/20160312125123/http://bobostertag.com/writings-articles-computer-music-sucks.htm
|
||||
|
||||
> 実際、アルス・エレクトロニカに応募された287の作品を聴いてみて、あえて言うなら、今日の最先端技術(スペクトル再合成、洗練された位相ヴォコーディング・スキームなど)で作られた作品は、ポピュラー音楽市場向けの楽器店で入手可能なMIDIモジュールで作られた作品よりも、さらに音の均一性が高い。 この事実が浮き彫りになったのは、審査員によって音色の新しさが並外れていると指摘された作品が、その大部分が古いブクラのアナログ機材で制作されたものであることが判明したときだった。(DeepL翻訳)
|
14
content/アクースマティック.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
date: 2025-01-20 12:25
|
||||
---
|
||||
#notion #sound
|
||||
|
||||
録音技術誕生以降に生まれた、特定の音源を想定せずに音を聴くことを表した言葉。
|
||||
|
||||
テープ音楽である[[ミュージック・コンクレート]]を提唱した[[Pierre Schaffer]]による言葉。
|
||||
|
||||
[[Jonathan Sterne]]が[[聞こえくる過去─音響再生産の文化的起源]]で批判している
|
||||
|
||||
|
||||
> For instance, the claim that sound reproduction has “alienated” the voice from the human body implies that the voice and the body existed in some prior holistic, unalienated, and self present relation.
|
||||
> They assume that, at some time prior to the invention of sound reproduction technologies, the body was whole, undamaged, and phenomenologically coherent. (p20)
|
4
content/インフラストラクチャ.md
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
date: 2025-02-04 11:50
|
||||
---
|
||||
#stub
|
6
content/インフラ研究.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
date: 2025-02-04 11:50
|
||||
---
|
||||
#research
|
||||
|
||||
[[Critical Infrastructure Studies]]
|
@ -1,10 +1,14 @@
|
||||
---
|
||||
date: "2024-02-06T01:46:14+0900"
|
||||
---
|
||||
#scrap #memo
|
||||
|
||||
(昔のWorkflowyより、2020年ごろ)
|
||||
|
||||
[[逆卷しとね]]
|
||||
|
||||
[[インフラ研究]]
|
||||
|
||||
[ウゾウムゾウのためのインフラ論 | webあかし](https://webmedia.akashi.co.jp/categories/786)
|
||||
|
||||
- [https://webmedia.akashi.co.jp/categories/786](https://webmedia.akashi.co.jp/categories/786)
|
||||
@ -24,7 +28,7 @@ date: "2024-02-06T01:46:14+0900"
|
||||
- カームテクノロジーとスマートシティ
|
||||
- Ambient Computingもそうか
|
||||
- ”シェアリング・エコノミーを駆動させているのは企業の経済活動である。たとえば、「エアービーアンドビー(Airbnb)」や「中古品のリユースを無料で行うもの(たとえば U-Exchange)、自動車所有者が使わない時間帯だけ貸し出すもの(たとえば Zipcar)、あるいは同じく自動車所有者が近くにいる人を乗せてあげるライドシェアと呼ばれるもの(たとえば Uber)」にはサービスの関係(註39)はあっても、社会性のシェアはない。スキル、お金、移動、モノ、空間に関するサービスはあっても、相互の依存関係とそれを仲立ちする労働という社会性、そしてそれをもとに成り立つはずのコミュニティは、どこにもない。なにかをシェアしてつながりたいという願望はSNSを中心にして広く行き渡っている。しかしその願望のシェアは、生産性革命の文脈にあるシェアリング・エコノミーにおいて都合よく搾取され、願望が願望の対象となるべきもの(社会)に届くことはない。”
|
||||
- Platform Capitalismとも近い
|
||||
- [[Platform Capitalism]]とも近い
|
||||
- "スマートシティの居住者は、その機序を知らないまま入力と出力の情報だけを得ながら生活する。通話ボタンを押せば通話は成立するが、どのように通話が成立しているのかは知らなくて構わない。【Image 16】に見られるような入力と出力のあいだを媒介するアルゴリズムは人間にとってブラックボックスのままであるにもかかわらず、スマートな生活は実現してしまう。魔術のように。コンピュータがもたらす全体最適化による全体主義のなか、「近代の人間的倫理を上回る全体最適解」(註45)が得られる。ヒトとモノ、自然とが一体となって生まれる新しい技術的・宗教的・魔術的自然を、メディアアーティスト・落合陽一は計算機自然、あるいはデジタルネイチャーと呼ぶ(註46)。だが、ウォッシュバーンの指摘が正しければ、スマートシティの最適化の世界のなかで、わたしたちはいずれ、解を導き出す過程が不明の「デジタルネイチャーの技術的自然選択」(the techno-natural selection of digital nature)に晒されることになるだろう(註47)。"
|
||||
- "スマートシティ構想でシェアされていないものとは、スマートではないものである。"
|
||||
- 未来都市アトラス
|
||||
|
4
content/グリッチ.md
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
date: 2025-01-22 00:42
|
||||
---
|
||||
#stub
|
65
content/コードとデザイン 人間CMOSワークショップ.md
Normal file
@ -0,0 +1,65 @@
|
||||
---
|
||||
date: 2025-04-25 01:22
|
||||
---
|
||||
#teaching
|
||||
|
||||
## 主旨
|
||||
|
||||
現代のコンピューターの根幹である、論理回路について理解を深める。
|
||||
|
||||
また同時に、アナログ電子回路の構成の理解も深めるため、よくある旗上げゲームのような論理回路の模式的な理解ではなく、MOSFETを用いた論理回路の構成を行う。
|
||||
|
||||
授業の構成上、90分授業x2コマの前半は、身体を使って人一人が一つのトランジスタを演じるような形で動作のシミュレーションを行い、後半では実際にMOSFETを紙の上に銅箔テープを貼って作った電気回路の上にはんだ付けして作ることで理解を試みる。
|
||||
|
||||
## 前口上
|
||||
|
||||
現代のコンピューターとは、ハードウェア的には突き詰めると、電流の値が高い/低いという2種類の値を、いろんな組み合わせで受け取って、その結果をまたバイナリの値の集合として返すようなものである。
|
||||
|
||||
例えば連続的に見える整数や実数のような値も、0と1の組み合わせで表現でき、その足し算や引き算も、2値の値の組み合わせに対応する答えの組み合わせを返す回路を作る事で表現できる。
|
||||
## 人間CMOSの基本
|
||||
|
||||
### 共通のルール
|
||||
|
||||
高さが電圧を表すものとする。
|
||||
|
||||
電源と繋がっている配線は、常に高い位置にある。
|
||||
|
||||
GNDと繋がっている配線は、常に低い位置にある。
|
||||
|
||||
導通している場所同士は全て同じ位置でなければならない。
|
||||
|
||||
導通しているはずなのに高さが違うのは、ショート(短絡)を意味する。これは、電源とGNDが直結され、急激に電流が流れるので実際には危険。
|
||||
|
||||
### MOSFET役のルール
|
||||
|
||||
肩がゲート:入力とする。配線をするときは、肩に触れる。(別に直接触れなくとも良い)
|
||||
|
||||
PMOS、NMOSともに、右手をソース、左手をドレインとする。
|
||||
|
||||
右手のソースは、どこかと配線されたら、繋がっている部分の電圧と同じ電圧になる。電源に繋げれば上がるし、GNDにつながれば下がる。
|
||||
|
||||
NMOSの人は、**ソース(右手)よりゲート(肩)の電圧が高ければ、ドレイン(左手)をソースを同じ電圧にする(=スイッチオン)**
|
||||
|
||||
PMOSの人は、**ソース(右手)よりゲート(肩)の電圧が低ければ、ドレイン(左手)をソースを同じ電圧にする(=スイッチオン)**
|
||||
|
||||
## CMOSインバーターを演じてみよう
|
||||
|
||||
1グループあたりの役:
|
||||
|
||||
- 入力兼配線役
|
||||
- PMOS役
|
||||
- NMOS役
|
||||
- 出力配線役
|
||||
|
||||
あまりが出る場合は、1~2人を指示役として調整。
|
||||
|
||||
「入力がHIGHの時出力がLOW、入力がLOWの時出力がHIGHの回路を作ろう」
|
||||
|
||||
## 紙で作ってみよう
|
||||
|
||||
一人当たりPMOS、NMOS1個ずつ LEDと抵抗
|
||||
|
||||
20人いると、、、NMOS20、PMOS20
|
||||
|
||||
NANDも作ると、NMOS40、PMOS40いる
|
||||
|
8
content/サワードウ.md
Normal file
@ -0,0 +1,8 @@
|
||||
|
||||
#sourdough
|
||||
|
||||
[How to capture wild yeast for bread (and WHY it works) - YouTube](https://www.youtube.com/watch?v=F2BZB7Nf_P8)
|
||||
|
||||
NC State Universityで研究になってる
|
||||
|
||||
[Wild Sourdough – Public Science Lab](https://robdunnlab.com/projects/wildsourdough/)
|
@ -14,3 +14,7 @@ date: "2023-08-09T17:27:54+0900"
|
||||
DIYで作るものとして、ドローン用のESCを利用したオープンソーススピンコーターの[[Maasi - A 3D printed spin coater with touchscreen|Maasi]]がある
|
||||
|
||||
(作ったけどまだ回転が安定しないので放置している)
|
||||
|
||||
別にドローンESCとか、パルス検出センサーとか使って回転数正確に検出しなくても、ファンの指定電圧での最高回転数xPWMのパルス幅さえ決まっていれば再現性は保てる気がする。
|
||||
|
||||
ファンをそのまま使うと飛沫が気になるので、簡単にファンを取り外しできるようなケースを作れば良いのではないか
|
||||
|
6
content/ソフトウェアなど存在しない.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
date: 2025-01-29 14:35
|
||||
---
|
||||
#book
|
||||
|
||||
[[Friedrich Kittler]]
|