I'm trying to creating an interactive real-time audio application where the user should be able to control a few key audio processing parameters through a GUI. At present I'm using pyaudio for real-time processing and streaming of the audio data and matplotlib widgets to create the GUI. The code responsible for running the GUI and streaming are each on a different python process through the use of the multiprocessing package.
Is there a way to trigger the start of the pyaudio stream but from the GUI running on the other process. Below is how I imagine the code might look, but it obviously doesn't work. The difficulty I'm having is how to stop and trigger the pyaudio instance from across the other child process.
import time
from multiprocessing import Process
from multiprocessing.sharedctypes import Value
import pyaudio
import wave
import matplotlib.pyplot as plt
import numpy as np
def GUI():
fig = plt.figure()
fig.set_size_inches(10, 10)
ax1: plt.Axes = fig.add_subplot(111)
def play_click(event):
stream.start()
def pause(event):
stream.stop()
def onchange(value):
gain.value = value
slideraxis = fig.add_axes([0.1, 0.1, 0.4, 0.02])
slider = Slider(slideraxis, label='gain', valmin=0, valmax=1.0, valinit=gain.value, valstep=0.01)
slider.on_changed(onchange)
pause_btnaxis = fig.add_axes([0.5, 0.1, 0.1, 0.1])
pause_btn = Button(pause_btnaxis, 'Pause audio')
pause_btn.on_clicked(pause)
play_btnaxis = fig.add_axes([0.6, 0.1, 0.1, 0.1])
play_btn = Button(play_btnaxis, 'Play audio')
play_btn.on_clicked(play_click)
plt.show()
def stream_audio(process_state: Value, gain: Value, pause: Value):
# open file for streaming
input = r'path/to/audio'
wf = wave.open(input, 'rb')
chunk = 1024
# create Pyaudio instance
p = pyaudio.PyAudio()
# call back function which executes processing the stream processing
def callback(in_data, frame_count, time_info, flag):
# extract the parameter values
gain_val = gain.value
if wf:
wf_rate = wf.getframerate()
audio = wf.readframes(chunk)
audio = np.frombuffer(audio, dtype=np.int16).astype(np.float32).reshape(-1, 2)
else:
audio = np.fromstring(in_data, dtype=np.int16).astype(np.float32).reshape(-1, 2)
audio_data = audio * gain_val
return audio_data.astype(np.int16).tobytes(), pyaudio.paContinue
# open a .Stream object to write the wave file to
# output = True indicates the sound will be played rather than recorded
stream = p.open(format=pyaudio.paInt16, channels=wf.getnchannels(),
rate=wf.getframerate(), stream_callback=callback, start=False, output=True)
stream.start_stream()
while stream.is_active() and process_state.value:
time.sleep(5)
def main():
process_state = Value('b')
process_state.value = True
gain_value = Value('f')
gain_value.value = 1.0
processes = [
Process(target=ui,
args=(process_state,
gain_value,
pause)),
Process(target=stream_audio,
args=(process_state,
gain_value, pause))
]
for process in processes:
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
main()