I am developing app like converting speech into text, here is my code, I have used Apple's framework - speech.
- (void)viewDidLoad {
[super viewDidLoad];
[self.start_btn setEnabled:NO];
}
-(void)viewDidAppear:(BOOL)animated
{
self.speechRecognizer = [[SFSpeechRecognizer alloc]initWithLocale:[NSLocale localeWithLocaleIdentifier:@"en-US"]];
self.speechRecognizer.delegate = self;
[SFSpeechRecognizer requestAuthorization:^(SFSpeechRecognizerAuthorizationStatus authStatus) {
switch (authStatus) {
case SFSpeechRecognizerAuthorizationStatusAuthorized:
//User gave access to speech recognition
NSLog(@"Authorized");
[self.start_btn setEnabled:YES];
break;
case SFSpeechRecognizerAuthorizationStatusDenied:
//User denied access to speech recognition
NSLog(@"AuthorizationStatusDenied");
[self.start_btn setEnabled:NO];
break;
case SFSpeechRecognizerAuthorizationStatusRestricted:
//Speech recognition restricted on this device
NSLog(@"AuthorizationStatusRestricted");
[self.start_btn setEnabled:NO];
break;
case SFSpeechRecognizerAuthorizationStatusNotDetermined:
//Speech recognition not yet authorized
[self.start_btn setEnabled:NO];
break;
default:
NSLog(@"Default");
break;
}
}];
}
- (void)didReceiveMemoryWarning {
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
-(void)start_record{
//CAncel the previous task if it's running
NSError * outError;
AVAudioSession *audioSession = [AVAudioSession sharedInstance];
[audioSession setCategory:AVAudioSessionCategoryRecord error:&outError];
[audioSession setMode:AVAudioSessionModeMeasurement error:&outError];
[audioSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:&outError];
SFSpeechAudioBufferRecognitionRequest *request2 = [[SFSpeechAudioBufferRecognitionRequest alloc] init];
self.audioEngine = [[AVAudioEngine alloc]init];
AVAudioInputNode *inputNode = self.audioEngine.inputNode;
if (request2 == nil) {
NSLog(@"Unable to created a SFSpeechAudioBufferRecognitionRequest object");
}
if (inputNode == nil) {
NSLog(@"Audio engine has no input node ");
}
//configure request so that results are returned before audio recording is finished
request2.shouldReportPartialResults = YES;
self.recognitionTask = [self.speechRecognizer recognitionTaskWithRequest:request2 resultHandler:^(SFSpeechRecognitionResult * result, NSError * error1) {
BOOL isFinal = false;
if(result != nil)
{
self.speech_txt.text = result.bestTranscription.formattedString;
// NSLog(@" the result:%@",result.bestTranscription.formattedString);
NSLog(@"%@",self.speech_txt.text);
isFinal = result.isFinal;
}
if (error1 != nil || isFinal) {
[self.audioEngine stop];
[inputNode removeTapOnBus:0];
// [self.audioEngine stop];
// [self.recognitionRequest endAudio];
self.recognitionRequest = nil;
self.recognitionTask = nil;
[self.start_btn setEnabled:YES];
[self.start_btn setTitle:@"Start Recording" forState:UIControlStateNormal];
}
}];
AVAudioFormat *recordingFormat = [inputNode outputFormatForBus:0];
[inputNode installTapOnBus:0 bufferSize:1024 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when){
[self.recognitionRequest appendAudioPCMBuffer:buffer];
}];
NSError *error1;
[self.audioEngine prepare];
[self.audioEngine startAndReturnError:&error1];
self.speech_txt.text = @"(Go ahead , I'm listening)";
}
//MARK: SFSpeechRecognizerDelegate
-(void)speechRecognizer:(SFSpeechRecognizer *)speechRecognizer availabilityDidChange:(BOOL)available
{
if (available) {
[self.start_btn setEnabled:YES];
[self.start_btn setTitle:@"Start Recording" forState:UIControlStateNormal];
}
else{
[self.start_btn setEnabled:NO];
[self.start_btn setTitle:@"Recognition not available" forState:UIControlStateDisabled];
}}
- (IBAction)start_btn_action:(id)sender {
if (self.audioEngine.isRunning) {
[self.audioEngine stop];
[self.recognitionRequest endAudio];
[self.start_btn setEnabled:NO];
[self.start_btn setTitle:@"Stopping" forState:UIControlStateDisabled];
}
else{
[self start_record];
[self.start_btn setTitle:@"Stop Recording" forState:@""];
}}
I have implemented this code, while running it shows error like:
[Utility] +[AFAggregator logDictationFailedWithError:] Error Domain=kAFAssistantErrorDomain Code=203 "Timeout" UserInfo={NSLocalizedDescription=Timeout, NSUnderlyingError=0x170250140 {Error Domain=SiriSpeechErrorDomain Code=100 "(null)"}}
How can I resolve this?