I am making a voice assistant and in it I am trying to add a Hotword detection function, I am using he tutorial by 'Logical Spot' and am using the lsHotword module for it. In the process I trained my assistant according to my voice using the HTrainer program, then I used the ls program and put the directory of the trained model. Then I made a run program and when i tried to run the program I am getting a error.

The programs that I used are as follows:

The run program:

import os
from lsHotword import ls

while True:
    print("Speak Claire ")
    ls.lsHotword_loop()
    os.system("python claire.py")

the ls program:

Mpath = "C:\\Users\\admin\\AppData\\Local\\Programs\\Python\\Python38\\Lib\\site-packages\\lsHotword\\model.h5"











import pathlib                            #for Chime file remove if not supporting
Here = pathlib.Path(__file__).parent







import numpy as np
import time
from pydub import AudioSegment
from pydub.playback import play
import random
import sys
import io
import os

from scipy.io.wavfile import write
from lsHotword import hotword as ht

chime = AudioSegment.from_wav("C:\\Users\\admin\\AppData\\Local\\Programs\\Python\\Python38\\Lib\\site-packages\\lsHotword\\chime.wav")
# Use 1101 for 2sec input audio
Tx = 5511 # The number of time steps input to the model from the spectrogram
n_freq = 101 # Number of frequencies input to the model at each time step of the spectrogram



Ty = 1375# The number of time steps in the output of our model

from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Model, load_model, Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D
from tensorflow.keras.layers import GRU, Bidirectional, BatchNormalization, Reshape
from tensorflow.keras.optimizers import Adam

# GRADED FUNCTION: model

def model(input_shape):
    """
    Function creating the model's graph in Keras.
    
    Argument:
    input_shape -- shape of the model's input data (using Keras conventions)

    Returns:
    model -- Keras model instance
    """
    
    X_input = Input(shape = input_shape)
    
    ### START CODE HERE ###
    
    # Step 1: CONV layer (≈4 lines)
    X = Conv1D(filters=196, kernel_size=15, strides=4)(X_input)                                 # CONV1D
    X = BatchNormalization()(X)                                 # Batch normalization
    X = Activation('relu')(X)                                 # ReLu activation
    X = Dropout(0.8)(X)                                 # dropout (use 0.8)

    # Step 2: First GRU Layer (≈4 lines)
    X = GRU(units=128, return_sequences=True)(X)                                 # GRU (use 128 units and return the sequences)
    X = Dropout(0.8)(X)                                 # dropout (use 0.8)
    X = BatchNormalization()(X)                                 # Batch normalization
    
    # Step 3: Second GRU Layer (≈4 lines)
    X = GRU(units=128, return_sequences=True)(X)                                 # GRU (use 128 units and return the sequences)
    X = Dropout(0.8)(X)                                 # dropout (use 0.8)
    X = BatchNormalization()(X)                                 # Batch normalization
    X = Dropout(0.8)(X)                                 # dropout (use 0.8)
    
    # Step 4: Time-distributed dense layer (≈1 line)
    X = TimeDistributed(Dense(1, activation = "sigmoid"))(X) # time distributed  (sigmoid)

    ### END CODE HERE ###

    model = Model(inputs = X_input, outputs = X)
    
    return model




model = model(input_shape = (Tx, n_freq))
# model.summary()

opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=["accuracy"])


model = load_model(Mpath)

def detect_triggerword_spectrum(x):
    """
    Function to predict the location of the trigger word.
    
    Argument:
    x -- spectrum of shape (freqs, Tx)
    i.e. (Number of frequencies, The number time steps)

    Returns:
    predictions -- flattened numpy array to shape (number of output time steps)
    """
    # the spectogram outputs  and we want (Tx, freqs) to input into the model
    x  = x.swapaxes(0,1)
    x = np.expand_dims(x, axis=0)
    predictions = model.predict(x)
    return predictions.reshape(-1)

def has_new_triggerword(predictions, chunk_duration, feed_duration, threshold=0.5):
    """
    Function to detect new trigger word in the latest chunk of input audio.
    It is looking for the rising edge of the predictions data belongs to the
    last/latest chunk.
    
    Argument:
    predictions -- predicted labels from model
    chunk_duration -- time in second of a chunk
    feed_duration -- time in second of the input to model
    threshold -- threshold for probability above a certain to be considered positive

    Returns:
    True if new trigger word detected in the latest chunk
    """
    predictions = predictions > threshold
    chunk_predictions_samples = int(len(predictions) * chunk_duration / feed_duration)
    chunk_predictions = predictions[-chunk_predictions_samples:]
    level = chunk_predictions[0]
    for pred in chunk_predictions:
        if pred > level:
            return True
        else:
            level = pred
    return False


chunk_duration = 0.5 # Each read length in seconds from mic.
fs = 44100 # sampling rate for mic
chunk_samples = int(fs * chunk_duration) # Each read length in number of samples.

# Each model input data duration in seconds, need to be an integer numbers of chunk_duration
feed_duration = 10
feed_samples = int(fs * feed_duration)

assert feed_duration/chunk_duration == int(feed_duration/chunk_duration)

def get_audio_input_stream(callback):
    stream = pyaudio.PyAudio().open(
        format=pyaudio.paInt16,
        channels=1,
        rate=fs,
        input=True,
        frames_per_buffer=chunk_samples,
        input_device_index=0,
        stream_callback=callback)
    return stream


import pyaudio
from queue import Queue
import sys
import time

run = True
silence_threshold = 100
# Queue to communiate between the audio callback and main thread
q = Queue()



# Data buffer for the input wavform
data = np.zeros(feed_samples, dtype='int16')

def callback(in_data, frame_count, time_info, status):
    global run, data, silence_threshold    
      
    data0 = np.frombuffer(in_data, dtype='int16')

    if np.abs(data0).mean() < silence_threshold:
        return (in_data, pyaudio.paContinue)

    data = np.append(data,data0)    
    if len(data) > feed_samples:
        data = data[-feed_samples:]            # Process data async by sending a queue.
        q.put(data)
    return (in_data, pyaudio.paContinue)




def lsHotword_loop():
    global run
    run = True
    stream = get_audio_input_stream(callback)
    stream.start_stream()
    
    try:
        while run:
            data = q.get()
            spectrum = ht.get_spectrogram(data)
            preds = detect_triggerword_spectrum(spectrum)
            new_trigger = has_new_triggerword(preds, chunk_duration, feed_duration)
            if new_trigger:
                sys.stdout.write('Activated ')
                run = False
                play(chime)
                stream.stop_stream()
                stream.close()
                break
    except (KeyboardInterrupt, SystemExit):
       stream.stop_stream()
       stream.close()
       run = False

The error:

C:\Users\admin\Documents\Downloads\dist>python run.py
C:\Users\admin\AppData\Local\Programs\Python\Python37\lib\site-packages\pydub\utils.py:170: RuntimeWarning: Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work
  warn("Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work", RuntimeWarning)
C:\Users\admin\AppData\Local\Programs\Python\Python37\lib\site-packages\pydub\utils.py:184: RuntimeWarning: Couldn't find ffplay or avplay - defaulting to ffplay, but may not work
  warn("Couldn't find ffplay or avplay - defaulting to ffplay, but may not work", RuntimeWarning)
2020-11-24 10:26:03.378533: W tensorflow/stream_executor/platform/default/dso_loader.cc:59] Could not load dynamic library 'cudart64_101.dll'; dlerror: cudart64_101.dll not found
2020-11-24 10:26:03.378796: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
2020-11-24 10:26:06.486018: W tensorflow/stream_executor/platform/default/dso_loader.cc:59] Could not load dynamic library 'nvcuda.dll'; dlerror: nvcuda.dll not found
2020-11-24 10:26:06.486267: W tensorflow/stream_executor/cuda/cuda_driver.cc:312] failed call to cuInit: UNKNOWN ERROR (303)
2020-11-24 10:26:06.493459: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: DESKTOP-OOAD4LH
2020-11-24 10:26:06.493840: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: DESKTOP-OOAD4LH
2020-11-24 10:26:06.495896: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN)to use the following CPU instructions in performance-critical operations:  AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2020-11-24 10:26:06.507592: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x23ffb70d3a0 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2020-11-24 10:26:06.507800: I tensorflow/compiler/xla/service/service.cc:176]   StreamExecutor device (0): Host, Default Version
Traceback (most recent call last):
  File "run.py", line 2, in <module>
    from lsHotword import ls
  File "C:\Users\admin\AppData\Local\Programs\Python\Python37\lib\site-packages\lsHotword\ls.py", line 103, in <module>
    model = load_model(Mpath)
  File "C:\Users\admin\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\saving\save.py", line 186, in load_model
    loader_impl.parse_saved_model(filepath)
  File "C:\Users\admin\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\saved_model\loader_impl.py", line 113, in parse_saved_model
    constants.SAVED_MODEL_FILENAME_PB))
OSError: SavedModel file does not exist at: type here you model directory/{saved_model.pbtxt|saved_model.pb}
0

There are 0 answers