Analyzing Sound Data

31 Views Asked by At

I want to make a sound visualization of Bethoven music file. And I want to use user's gesture to conduct the file's sound too. I will train the model of recognizing user's gesture to change the sound's characteristics by Wekinator. So I want to the shape will change according to the trained data from Wekinator. But I cannot run the code. The bug I met is "The global variable “left” does not exist" at

float[] leftChannel = beethoven.left;
float[] rightChannel = beethoven.right;
import processing.sound.*;
import oscP5.*;
import netP5.*;

SoundFile beethoven;
OscP5 oscP5;
NetAddress dest;
float n4;
float n6;
float xValue = 0.0;
float yValue = 0.0;
float zValue = 0.0;
float[] wekinatorOutputs = new float[4];
float tempo1 = 1.0;
float pitch1 = 1.0;

boolean isSongPlaying = false;

void setup() {
  size(700, 850);
  noCursor();
  smooth();
  background(0);
  frameRate(24);

  beethoven = new SoundFile(this, "beethoven.mp3");
  oscP5 = new OscP5(this, 12000);
  dest = new NetAddress("127.0.0.1", 6448);
}

void draw() {
  fill(0, 50);
  noStroke();
  rect(0, 0, width, height);
  translate(width / 2, height / 2);

  if (beethoven != null && beethoven.isPlaying()) {
    analyzeSound();
  }
}

void analyzeSound() {
  float[] leftChannel = beethoven.left;
  float[] rightChannel = beethoven.right;
  int totalFrames = leftChannel.length; // Total frames in the sound file

  for (int i = 0; i < totalFrames - 1; i += 10) {
    float leftAmplitude = leftChannel[i];
    float rightAmplitude = rightChannel[i];

    // Use Wekinator output to modify the shape properties
    float angle = sin(i + n4) * 10 * (1 + wekinatorOutputs[0]); // Adjust angle based on Wekinator output
    float angle2 = sin(i + n6) * 300 * (1 + wekinatorOutputs[1]); // Adjust angle2 based on Wekinator output

    float x = sin(radians(i)) * (angle2 + 30);
    float y = cos(radians(i)) * (angle2 + 30);

    float x3 = sin(radians(i)) * (500 / angle);
    float y3 = cos(radians(i)) * (500 / angle);

    fill(#000000, 90); // Yellow
    ellipse(x, y, leftAmplitude * 10, leftAmplitude * 10);

    fill(#ffffff, 60); // White
    rect(x3, y3, rightAmplitude * 20, rightAmplitude * 10);
  }

  n4 += 0.008;
  n6 += 0.04;
}

// Other functions (adjustSongProperties, sendToWekinator, oscEvent, mousePressed) remain the same


// Other functions (adjustSongProperties, sendToWekinator, oscEvent, mousePressed) remain the same


// Other functions (adjustSongProperties, sendToWekinator, oscEvent, mousePressed) remain the same


void adjustSongProperties() {
  sendToWekinator(xValue, yValue, zValue);

  pitch1 = (abs(wekinatorOutputs[0]) + 0.1);
  tempo1 = (abs(wekinatorOutputs[2]) + 1) * 0.5;

  beethoven.rate(tempo1);
  beethoven.amp(pitch1);
}

void sendToWekinator(float x, float y, float z) {
  OscMessage msg = new OscMessage("/wek/in");
  msg.add(x);
  msg.add(y);
  msg.add(z);
  oscP5.send(msg, dest);
}

void oscEvent(OscMessage msg) {
  if (msg.checkAddrPattern("/wek/out") && msg.checkTypetag("ffff")) {
    wekinatorOutputs[0] = msg.get(0).floatValue();
    wekinatorOutputs[1] = msg.get(1).floatValue();
    wekinatorOutputs[2] = msg.get(2).floatValue();
    wekinatorOutputs[3] = msg.get(3).floatValue();
    adjustSongProperties();
  }
}

void mousePressed() {
  if (!isSongPlaying) {
    beethoven.play();
    isSongPlaying = true;
  }
}

I want to make a sound visualization of Bethoven music file. And I want to use user's gesture to conduct the file's sound too. I will train the model of recognizing user's gesture to change the sound's characteristics by Wekinator. So I want to the shape will change according to the trained data from Wekinator.

0

There are 0 best solutions below