I am trying to pass audio packets to the VoIP server through the DatagramPacket, This is my class for Audio Streaming When we are trying to call audioRecorder.Read() function then every time getting "AudioSystem.INVALID_OPERATION" or "AudioSystem.BAD_VALUE" only on above android 11 Versions.
I am getting the "STATE_INITIALIZED" for the new AudioRecord.Builder() instance.
import android.annotation.SuppressLint;
import android.media.AudioAttributes;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.media.AudioTrack;
import android.media.MediaRecorder;
import android.media.audiofx.AcousticEchoCanceler;
import android.media.audiofx.NoiseSuppressor;
import android.os.Build;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import android.os.Process;
import android.util.Log;
import com.google.gson.Gson;
import Constants;
import HelperClass.AudioCodec.MuLawDecoder;
import HelperClass.AudioCodec.MuLawEncoder;
import HelperClass.LiveChat;
import HelperClass.NetUtils.IPEndPoint;
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.util.Arrays;
public class AudioStreaming {
private static final String TAG = Constants.AUDIO_STREAMING;
private DatagramSocket socket;
private IPEndPoint master;
private boolean isReceiving = true;
private boolean isSending = true;
//parameter for audio
private static final int SAMPLE_RATE = 16000;
private static final int INPUT_CHANNEL = AudioFormat.CHANNEL_IN_MONO;
private static final int OUTPUT_CHANNEL = AudioFormat.CHANNEL_OUT_MONO;
private static final int FORMAT = AudioFormat.ENCODING_PCM_16BIT;
private static final int BUFFER_UNIT_LENGTH = 512;
private static final int INPUT_BUFFER_SIZE =
(AudioRecord.getMinBufferSize(SAMPLE_RATE, INPUT_CHANNEL, FORMAT) / BUFFER_UNIT_LENGTH + 1) * BUFFER_UNIT_LENGTH;
private static final int OUTPUT_BUFFER_SIZE =
(AudioTrack.getMinBufferSize(SAMPLE_RATE, OUTPUT_CHANNEL, FORMAT) / BUFFER_UNIT_LENGTH) * BUFFER_UNIT_LENGTH;
private AudioRecord audioRecord;
private AudioTrack audioTrack;
private LiveChat chatClient;
private short[][] inputBuffers;
Handler mHandler;
private int inputBufferNums;
private int inputBuffersIndex;
private byte[][] outputBuffers;
private int outputBufferNums;
private int outPutBuffersIndex;
public AudioStreaming(DatagramSocket socket, IPEndPoint master) {
this.socket = socket;
this.master = master;
}
public void startAudioStream() {
//mic --> UDP
inputBufferNums = 10;
inputBuffers = new short[inputBufferNums][INPUT_BUFFER_SIZE];
inputBuffersIndex = 0;
prepareInputAudio();
//UDP --> speaker
outputBufferNums = 10;
outputBuffers = new byte[outputBufferNums][BUFFER_UNIT_LENGTH];
outPutBuffersIndex = 0;
prepareOutputAudio();
System.out.println("Input buffer size: " + INPUT_BUFFER_SIZE);
System.out.println("Output buffer size: " + OUTPUT_BUFFER_SIZE);
}
public void stopAudioStream() {
isReceiving = false;
isSending = false;
}
//walki-talki communication function
public void talk() {
if (audioRecord != null && audioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
audioTrack.setVolume((float) 0.0);
audioRecord.startRecording();
}
}
public void stopTalk() {
if (audioRecord != null && audioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
audioRecord.stop();
audioTrack.setVolume((float) 1.0);
}
}
//mic --> UDP
private void prepareInputAudio() {
// heart beat thread
new Thread(new Runnable() {
@Override
public void run() {
try {
DatagramPacket sendingPacket;
byte[] hearBeat = new byte[]{'2'};
while (isSending) {
// while not talking, send heart beat
sendingPacket = new DatagramPacket(hearBeat, 1, master.getIpAddress(), master.getPort());
socket.send(sendingPacket);
Thread.sleep(1500);
}
} catch (Exception e) {
Log.d(TAG, e + "");
}
}
}).start();
// voice send out thread
Thread inputAudioStream = new Thread(new Runnable() {
@SuppressLint("MissingPermission")
@Override
public void run() {
try {
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
AudioFormat audioFormat = new AudioFormat.Builder()
.setEncoding(FORMAT)
.setSampleRate(SAMPLE_RATE)
.setChannelMask(INPUT_CHANNEL)
.build();
audioRecord = new AudioRecord.Builder()
.setAudioSource(MediaRecorder.AudioSource.MIC)
.setAudioFormat(audioFormat)
.setBufferSizeInBytes(2 * BUFFER_UNIT_LENGTH)
.build();
Log.e("audioRecordStateLogs", " : " + audioRecord.getState());
} else {
audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, SAMPLE_RATE, INPUT_CHANNEL, FORMAT, INPUT_BUFFER_SIZE);
}
//acoustic echo canceller
int audioSessionId = audioRecord.getAudioSessionId();
if (AcousticEchoCanceler.isAvailable()) {
AcousticEchoCanceler.create(audioSessionId);
}
if (NoiseSuppressor.isAvailable()) {
NoiseSuppressor.create(audioSessionId);
}
//start to send out sound data packets
DatagramPacket sendingPacket;
while (isSending) {
short[] audioDataBuffer = Arrays.copyOfRange(inputBuffers[inputBuffersIndex], inputBuffersIndex * BUFFER_UNIT_LENGTH, (inputBuffersIndex + 1) * BUFFER_UNIT_LENGTH);
int read = 0;
if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.M) {
read = audioRecord.read(audioDataBuffer, 0, audioDataBuffer.length, AudioRecord.READ_BLOCKING);
} else {
read = audioRecord.read(inputBuffers[inputBuffersIndex], 0, inputBuffers[inputBuffersIndex].length);
}
inputBuffers[inputBuffersIndex].length);
Log.e("readBufferLogsRead", " : " + read);
//read ==> number of frames read in
byte[] toSend;
if (read != 0) {
for (int j = 0; j < read; j++) {
int temp = inputBuffers[inputBuffersIndex][j] * 10;
if (temp >= 32767)
inputBuffers[inputBuffersIndex][j] = 32767;
else if (temp <= -32768)
inputBuffers[inputBuffersIndex][j] = -32768;
else
inputBuffers[inputBuffersIndex][j] = (short) temp;
}
try {
//break the buffer into small packets with 512 bytes length, ESP_32 only receiving this size for now
int numOfPackets = inputBuffers[inputBuffersIndex].length / BUFFER_UNIT_LENGTH;
for (int i = 0; i < numOfPackets; i++) {
toSend = MuLawEncoder.MuLawEncode(Arrays.copyOfRange(inputBuffers[inputBuffersIndex], i * BUFFER_UNIT_LENGTH, (i + 1) * BUFFER_UNIT_LENGTH), BUFFER_UNIT_LENGTH);
sendingPacket = new DatagramPacket(toSend, toSend.length, master.getIpAddress(), master.getPort());
master.getIpAddress(), 7000);
socket.send(sendingPacket);
}
inputBuffersIndex = (inputBuffersIndex + 1) % inputBufferNums;
} catch (Exception e) {
e.printStackTrace();
System.out.println(e.getMessage());
}
}
}
audioRecord.stop();
audioRecord.release();
} catch (Exception e) {
e.printStackTrace();
if (audioRecord != null) {
audioRecord.stop();
audioRecord.release();
}
}
}
});
inputAudioStream.start();
}
//UDP --> speaker
private void prepareOutputAudio() {
mHandler = new Handler(Looper.getMainLooper()) {
@Override
public void handleMessage(Message message) {
if (message.what == 0) {
socket.disconnect();
VoiceCallingScreen.FailedCall();
}
}
};
Thread outputAudioStream = new Thread(new Runnable() {
@Override
public void run() {
try {
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, SAMPLE_RATE, OUTPUT_CHANNEL, FORMAT, OUTPUT_BUFFER_SIZE, AudioTrack.MODE_STREAM);
DatagramPacket receivingPacket;
short[] toWrite;
audioTrack.play();
socket.setSoTimeout(5000);
while (isReceiving) {
receivingPacket = new DatagramPacket(outputBuffers[outPutBuffersIndex], BUFFER_UNIT_LENGTH);
socket.receive(receivingPacket);
Log.d("Rec Packet size", receivingPacket.getLength() + "");
if (receivingPacket.getLength() < BUFFER_UNIT_LENGTH)
continue; // skip the first few packets during device ringing
toWrite = MuLawDecoder.MuLawDecode(outputBuffers[outPutBuffersIndex]);
audioTrack.write(toWrite, 0, toWrite.length);
outPutBuffersIndex = (outPutBuffersIndex + 1) % outputBufferNums;
}
audioTrack.stop();
audioTrack.release();
} catch (IOException e) {
Log.e(TAG, e.toString());
Message message = mHandler.obtainMessage(0, 1, -1, String.format(String.valueOf(R.string.Connection_Failed_From_Device_Side_Disconnecting_Call)));
message.sendToTarget();
}
}
});
outputAudioStream.start();
}
}