No sound from microphone until volume is adjusted in WebRTC application
Expected Behavior: When the microphone is turned on, I expect to hear sound immediately without needing to adjust the volume control.
Actual Behavior: Currently, there's no sound from the microphone until the volume control is adjusted. After adjusting the volume, the sound becomes audible as expected.
This issue occurs consistently across different browsers. I've ensured that microphone permissions are granted and that the browser supports the necessary Web Audio API features.
useEffect(() => {
for (const peer of peers.values()) {
const audioConsumer = peer.getConsumerByType(SERVICE_TYPE.VOICE);
if (audioConsumer) {
if (!gainNodes.current[peer.id.toString()]) {
peer.resume(SERVICE_TYPE.VOICE);
const ms = new MediaStream();
ms.addTrack(audioConsumer.track);
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
const gainNode = audioContext.createGain() as GainNode;
gainNode.gain.value = volume[peer.id.toString()] || 1;
const source = audioContext.createMediaStreamSource(ms);
source.connect(gainNode);
gainNode.connect(audioContext.destination);
gainNodes.current[peer.id.toString()] = gainNode;
}
}
}
return () => {
for (const key in gainNodes.current) {
if (gainNodes.current.hasOwnProperty(key)) {
const gainNode = gainNodes.current[key];
if (gainNode) {
gainNode.disconnect();
}
}
}
gainNodes.current = {};
};
}, [peers, volume]);
useEffect(() => {
const handleVolumeChange = () => {
const mic = navigator.mediaDevices.getUserMedia({ audio: true });
mic.then(stream => {
if (stream.getAudioTracks().length === 0) {
setVolume(prev => {
const newVolume = { ...prev };
for (const peer of peers.values()) {
newVolume[peer.id.toString()] = 1;
}
return newVolume;
});
audioRefs.current.forEach(audio => {
if (audio) {
audio.volume = 1;
}
});
} else {
setVolume(prev => {
const newVolume = { ...prev };
for (const peer of peers.values()) {
if (newVolume[peer.id.toString()] === 0) {
newVolume[peer.id.toString()] = 1;
}
}
return newVolume;
});
audioRefs.current.forEach(audio => {
if (audio) {
audio.volume = 1;
}
});
}
stream.getTracks().forEach(track => {
track.onmute = () => {
setVolume(prev => {
const newVolume = { ...prev };
for (const peer of peers.values()) {
newVolume[peer.id.toString()] = 0;
}
return newVolume;
});
audioRefs.current.forEach(audio => {
if (audio) {
audio.volume = 0;
}
});
};
track.onunmute = () => {
setVolume(prev => {
const newVolume = { ...prev };
for (const peer of peers.values()) {
if (newVolume[peer.id.toString()] === 0) {
newVolume[peer.id.toString()] = 1;
}
}
return newVolume;
});
audioRefs.current.forEach(audio => {
if (audio) {
audio.volume = 1;
}
});
};
});
});
};
handleVolumeChange();
}, [peers]);
const handleVolumeChange = (e: React.ChangeEvent<HTMLInputElement>, peerId: string) => {
const newVolume = parseFloat(e.target.value);
setVolume(prevVolume => ({
...prevVolume,
[peerId]: newVolume === 0 ? 0.0001 : newVolume,
}));
const gainNode = gainNodes.current[peerId];
if (gainNode) {
gainNode.gain.value = newVolume === 0 ? 0.0001 : newVolume;
}
};
return (
<div className={classes.root}>
{Array.from(peers.values()).map((peer, i) => {
const audioConsumer = peer.getConsumerByType(SERVICE_TYPE.VOICE);
if (audioConsumer) {
return (
<div key={i}>
<input
type="range"
min="0"
max="1"
step="0.01"
value={volume[peer.id.toString()] || 1}
onChange={(e) => handleVolumeChange(e, peer.id.toString())}
ref={(input) => {
if (input) {
input.onmouseup = () => input.blur();
}
}}
/>
</div>
);
}
return null;
})}
</div>
);
The part of your code which enables the microphone is behind the handleVolumeChange() event under the second useEffect() section. It should be moved elsewhere so that it is not dependent on the user changing the volume first. Here's an idea of how this might work:
Basically, move your
const mic = navigator...code outside of theconst handleVolumeChange = () => {...portion. Hopefully this helps point you in the right direction.