...
Über die Lautstärke haben wir die einfachste Methode festzustellen, wie viel (klangliche) Aktivität in einer Umgebung herrscht. Die Werte, welche uns durch die Minim Library mit der Funktion obj.left.level() bzw. obj.right.level() zurückgegeben werden ist dabei proportional zur Lautstärke und im Bereich zwischen 0 und 1.
Image Added
Code Block |
---|
language | java |
---|
title | Beisiel Lautstärke |
---|
collapse | true |
---|
|
//Spatial Interaction
//ZHdK, Interaction Design
//iad.zhdk.ch
//Beispiel 01: Volume
import ddf.minim.*;
Minim minim;
AudioInput in;
float amountOfNoise = 0;
void setup()
{
size(512, 200, P3D);
minim = new Minim(this);
//Open the audio input
in = minim.getLineIn();
}
void draw()
{
background(amountOfNoise);
stroke(255);
rect( 0, 0, in.left.level()*width, 100 );
rect( 0, 100, in.right.level()*width, 100 );
if(in.left.level() > 0.2 || in.right.level() > 0.2)
{
amountOfNoise++;
}
else if(amountOfNoise > 0)
{
amountOfNoise--;
}
text(amountOfNoise, 20, 20);
} |
Wellenform
Image Added
Code Block |
---|
language | java |
---|
title | Beispiel Wellenform |
---|
collapse | true |
---|
|
//Spatial Interaction
//ZHdK, Interaction Design
//iad.zhdk.ch
//Beispiel 02: Waveform
import ddf.minim.*;
Minim minim;
AudioInput in;
void setup()
{
size(640, 480, P3D);
minim = new Minim(this);
//Open the audio input
in = minim.getLineIn();
}
void draw()
{
background(0);
for(int i = 0; i < in.bufferSize() - 1; i++)
{
stroke(0,0,255);
line( i, 200, i, 200 - in.left.get(i+1)*50 );
stroke(255,0,0);
line( i, 280, i, 280 + in.right.get(i+1)*50 );
}
} |
FFT
Image Added
Code Block |
---|
language | java |
---|
title | Beispiel FFT |
---|
collapse | true |
---|
|
//Spatial Interaction
//ZHdK, Interaction Design
//iad.zhdk.ch
//Beispiel 03: FFT
import ddf.minim.analysis.*;
import ddf.minim.*;
Minim minim;
AudioInput in;
FFT fft;
int worldRecord = 0;
int threshold = 100;
void setup()
{
size(640, 480, P3D);
minim = new Minim(this);
//Open the audio input
in = minim.getLineIn();
//Create FFT object
fft = new FFT( in.bufferSize(), in.sampleRate() );
}
void draw()
{
background(0);
stroke(255);
fft.forward( in.mix );
for (int i = 0; i < fft.specSize(); i++)
{
if (fft.getBand(i) > threshold && fft.getBand(i) > worldRecord)
{
worldRecord = i;
}
line( i, height, i, height - fft.getBand(i)*8 );
}
ellipse(worldRecord, 20, 20, 20);
text(fft.getBand(worldRecord), worldRecord+10, 20);
} |
Spracherkennung
Image Added
Code Block |
---|
language | xml |
---|
title | Speech Recognition |
---|
collapse | true |
---|
|
<!DOCTYPE HTML>
<html>
<head>
</head>
<body>
<script type="text/javascript">
// We need to check if the browser supports WebSockets
if ("WebSocket" in window) {
// Before we can connect to the WebSocket, we need to start it in Processing.
// Example using WebSocketP5
// http://github.com/muthesius/WebSocketP5
var ws = new WebSocket("ws://localhost:8080/p5websocket");
} else {
// The browser doesn't support WebSocket
alert("WebSocket NOT supported by your Browser!");
}
// Now we can start the speech recognition
// Supported only in Chrome
// Once started, you need to allow Chrome to use the microphone
var recognition = new webkitSpeechRecognition();
// Be default, Chrome will only return a single result.
// By enabling "continuous", Chrome will keep the microphone active.
recognition.continuous = true;
recognition.onresult = function(event) {
// Get the current result from the results object
var transcript = event.results[event.results.length-1][0].transcript;
// Send the result string via WebSocket to the running Processing Sketch
ws.send(transcript);
}
// Start the recognition
recognition.start();
</script>
</body>
</html> |
...