Lautstärke
Code Block | ||||||
---|---|---|---|---|---|---|
| ||||||
//Spatial Interaction //ZHdK, Interaction Design //iad.zhdk.ch //Beispiel 01: Volume import ddf.minim.*; Minim minim; AudioInput in; float amountOfNoise = 0; void setup() { size(512, 200, P3D); minim = new Minim(this); //Open the audio input in = minim.getLineIn(); } void draw() { background(amountOfNoise); stroke(255); rect( 0, 0, in.left.level()*width, 100 ); rect( 0, 100, in.right.level()*width, 100 ); if(in.left.level() > 0.2 || in.right.level() > 0.2) { amountOfNoise++; } else if(amountOfNoise > 0) { amountOfNoise--; } text(amountOfNoise, 20, 20); } |
...
Code Block | ||||||
---|---|---|---|---|---|---|
| ||||||
//Spatial Interaction
//ZHdK, Interaction Design
//iad.zhdk.ch
//Beispiel 03: FFT
import ddf.minim.analysis.*;
import ddf.minim.*;
Minim minim;
AudioInput in;
FFT fft;
int worldRecord = 0;
int threshold = 100;
void setup()
{
size(640, 480, P3D);
minim = new Minim(this);
//Open the audio input
in = minim.getLineIn();
//Create FFT object
fft = new FFT( in.bufferSize(), in.sampleRate() );
}
void draw()
{
background(0);
stroke(255);
fft.forward( in.mix );
for (int i = 0; i < fft.specSize(); i++)
{
if (fft.getBand(i) > threshold && fft.getBand(i) > worldRecord)
{
worldRecord = i;
}
line( i, height, i, height - fft.getBand(i)*8 );
}
ellipse(worldRecord, 20, 20, 20);
text(fft.getBand(worldRecord), worldRecord+10, 20);
} |
Spracherkennung
Code Block | ||||||
---|---|---|---|---|---|---|
| ||||||
<!DOCTYPE HTML> <html> <head> </head> <body> <script type="text/javascript"> // We need to check if the browser supports WebSockets if ("WebSocket" in window) { // Before we can connect to the WebSocket, we need to start it in Processing. // Example using WebSocketP5 // http://github.com/muthesius/WebSocketP5 var ws = new WebSocket("ws://localhost:8080/p5websocket"); } else { // The browser doesn't support WebSocket alert("WebSocket NOT supported by your Browser!"); } // Now we can start the speech recognition // Supported only in Chrome // Once started, you need to allow Chrome to use the microphone var recognition = new webkitSpeechRecognition(); // Be default, Chrome will only return a single result. // By enabling "continuous", Chrome will keep the microphone active. recognition.continuous = true; recognition.onresult = function(event) { // Get the current result from the results object var transcript = event.results[event.results.length-1][0].transcript; // Send the result string via WebSocket to the running Processing Sketch ws.send(transcript); } // Start the recognition recognition.start(); </script> </body> </html> |
...