Getting a type error when trying to run this python script

64 views Asked by At

Getting this error below. How can I fix it?

1 frames
<ipython-input-7-3be77148eaa3> in create_training_data(data)
     39     # Loop through the data and retrieve relevant info
     40     for i in range(len(data['energy'])):
---> 41         X[i][0] = data['energy'][i]
     42         X[i][1] = data['chroma_stft'][i]
     43         X[i][2] = data['spectral_contrast'][i]

TypeError: 'numpy.float64' object does not support item assignment

I am trying to build this music generator. But getting the error mentioned above. Can someone please have a look at the code and let me know what to update to make it all work?

The full code is provided below. Completing a project with deadline close so some help would really be appreciated.

# importing necessary libraries 
import pandas as pd 
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import librosa

# Gathering user's input
# Gather the tempo, rhythm and tone of the song
tempo = int(input("What tempo would you like the song to have? (BPM)"))
rhythm = input("What rhythm would you like the song to have? (e.g. 4/4) ")
tone = input("What is the 'feel' or tone of the song? (e.g. cheerful, melancholy)")

# Defining some Helper Functions 

# File_Retreival
# This function will retrieve our dataset with audio information 
def file_retreival(file): 
    # Using Librosa to import the audio file 
    audio, sample_rate = librosa.load(file)
    
    # Defining various results from the audio source 
    results = {
        "tempo": librosa.beat.tempo(audio, sample_rate), 
        "energy": librosa.feature.rms(audio), 
        "chroma_stft": librosa.feature.chroma_stft(audio), 
        "spectral_contrast": librosa.feature.spectral_contrast(audio), 
        "tonnetz": librosa.feature.tonnetz(y=librosa.effects.harmonic(audio))
    }
    return results

# Create Training Data
# This function will prepare our data for training 
def create_training_data(data):
    # Retrieve the silhouette information 
    data_shape = np.ravel(data['chroma_stft']).shape
    # Create an empty list to store training data
    X = np.zeros(data_shape[0])
    # Loop through the data and retrieve relevant info 
    for i in range(len(data['energy'])): 
        X[i][0] = data['energy'][i]
        X[i][1] = data['chroma_stft'][i]
        X[i][2] = data['spectral_contrast'][i]
        X[i][3] = data['tonnetz'][i]
        X[i][4] = data['tempo']
    return X 

# Build Model
# This function will build our ML model
def build_model(X_train, y_train):
    from sklearn.ensemble import RandomForestClassifier
    # Create the model
    model = RandomForestClassifier(n_estimators=100)
    # Train the model using X_train and y_train
    model.fit(X_train, y_train) 
    # Return the model
    return model 

# Generate Music
# This function will generate the music using ML
def generate_music(model, X):
    # Generate predictions using X
    pred = model.predict(X) 
    # Return the predictions
    return pred

# Create Music 
# This function will create the music according to the tempo and user-given parameters
def create_music(pred, sample_rate, tempo, rhythm, tone):
    # Import necessary libraries 
    from mido import Message, MidiFile
    from mido.midifiles import MidiTrack
    from mido.messages import note_on, note_off 
    # Create a new MIDI file 
    mid = MidiFile()
    # Create a track 
    track = MidiTrack() 
    mid.tracks.append(track)
    # Set the tempo 
    track.append(Message("set_tempo", tempo=tempo))
    # Set the time signature 
    track.append(Message("time_signature", numerator=rhythm))
    # Iterate through the predictions and create the notes 
    for i in range(len(pred)):
        duration = pred[i][0] 
        # Note on message 
        track.append(Message("note_on", note=pred[i][1], velocity=127, time=0)) 
        # Note off message 
        track.append(Message("note_off", note=pred[i][1], velocity=127, time=duration)) 
    # Set the tone according to the user given parameters 
    track.append(Message("control_change", data=[2,tone]))
    # Save the MIDI file 
    mid.save("generated_music.mid")

# Main function 
# This function will execute the program and generate the music
def main():
    # Gather the file and retrieve the data 
    file = "audio.mp3"
    data = file_retreival(file)
    # Create the training data 
    X_train = create_training_data(data)
    # Create the labels (in this case, empty labels as we just want to generate music)
    y_train = np.zeros(X_train.shape[0])
    # Build the model 
    model = build_model(X_train, y_train)
    # Generate the music predictions 
    music_pred = generate_music(model, X_train)
    # Create the music 
    create_music(music_pred, data['sample_rate'], tempo, rhythm, tone)
    
# Execute the main function 
main()
0

There are 0 answers